diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e4cc5db1..49aa4da1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -29,6 +29,14 @@ xenomai2: - ./configure --disable-8139too --with-linux-dir=/usr/src/linux-obj/x86_64/0318xenomai2 --enable-rtdm --with-xenomai-dir=/usr - make -j8 all modules +kernel 3.4: + stage: build + image: registry.gitlab.com/etherlab.org/build-container-factory/ethercat-kernels:k3.4-rt145 + script: + - ./bootstrap + - ./configure --enable-8139too --enable-e100 --enable-e1000 --enable-e1000e --enable-r8169 --with-linux-dir=/usr/src/linux-obj/x86_64/0304rt + - make -j8 all modules + xenomai3: stage: build image: registry.gitlab.com/etherlab.org/build-container-factory/ethercat-kernels:xenomai3 diff --git a/NEWS b/NEWS index d4bb398d..24488b22 100644 --- a/NEWS +++ b/NEWS @@ -10,6 +10,7 @@ Changes in 1.6.0: * Dropped support for kernels < 3.0. * Allow to query the scanning progress via API. * Added EoE set IP command via command-line-tool. +* Changed the default AL state change timeout from 5 to 10 s. Changes since 1.5.2: diff --git a/configure.ac b/configure.ac index 7a9d22d1..04b78bf4 100644 --- a/configure.ac +++ b/configure.ac @@ -66,7 +66,7 @@ AC_ARG_ENABLE([kernel], ;; no) enablekernel=0 ;; - *) AC_MSG_ERROR([Invalid value for --enable-generic]) + *) AC_MSG_ERROR([Invalid value for --enable-kernel]) ;; esac ], @@ -163,6 +163,37 @@ AC_SUBST(INSTALL_MOD_DIR,[$moddir]) AC_MSG_CHECKING([for Linux modules installation directory]) AC_MSG_RESULT([$INSTALL_MOD_DIR]) +#----------------------------------------------------------------------------- +# Native driver resource (de)allocation verifying +#----------------------------------------------------------------------------- + +AC_MSG_CHECKING([whether to verify native driver resources]) + +AC_ARG_ENABLE([driver-resource-verifying], + AS_HELP_STRING([--enable-driver-resource-verifying], + [Verify resource (de-)allocation in native drivers]), + [ + case "${enableval}" in + yes) enableverifying=1 + ;; + no) enableverifying=0 + ;; + *) AC_MSG_ERROR([Invalid value for --enable-driver-resource-verifying]) + ;; + esac + ], + [enableverifying=0] +) + +if test "x$enableverifying" = "x1"; then + AC_MSG_RESULT([yes]) +else + AC_MSG_RESULT([no]) +fi + +AM_CONDITIONAL(ENABLE_DRIVER_RESOURCE_VERIFYING, test "x$enableverifying" = "x1") +AC_SUBST(ENABLE_DRIVER_RESOURCE_VERIFYING,[$enableverifying]) + #----------------------------------------------------------------------------- # Generic Ethernet driver #----------------------------------------------------------------------------- @@ -633,6 +664,80 @@ fi AC_SUBST(KERNEL_R8169,[$kernel_r8169]) +#----------------------------------------------------------------------------- +# stmmac-pci and dwmac-intel driver +#----------------------------------------------------------------------------- + +enablestmmac=0 + +AC_ARG_ENABLE([stmmac-pci], + AS_HELP_STRING([--enable-stmmac-pci], + [Enable stmmac driver]), + [ + case "${enableval}" in + yes) enablestmmacpci=1 + enablestmmac=1 + ;; + no) enablestmmacpci=0 + ;; + *) AC_MSG_ERROR([Invalid value for --enable-stmmac]) + ;; + esac + ], + [enablestmmacpci=0] # disabled by default +) + +AM_CONDITIONAL(ENABLE_STMMACPCI, test "x$enablestmmacpci" = "x1") +AC_SUBST(ENABLE_STMMACPCI,[$enablestmmacpci]) + +AC_ARG_ENABLE([dwmac-intel], + AS_HELP_STRING([--enable-dwmac-intel], + [Enable stmmac driver]), + [ + case "${enableval}" in + yes) enabledwmacintel=1 + enablestmmac=1 + ;; + no) enabledwmacintel=0 + ;; + *) AC_MSG_ERROR([Invalid value for --enable-stmmac]) + ;; + esac + ], + [enabledwmacintel=0] # disabled by default +) + +AM_CONDITIONAL(ENABLE_DWMACINTEL, test "x$enabledwmacintel" = "x1") +AC_SUBST(ENABLE_DWMACINTEL,[$enabledwmacintel]) + +AM_CONDITIONAL(ENABLE_STMMAC, test "x$enablestmmac" = "x1") +AC_SUBST(ENABLE_STMMAC, [$enablestmmac]) + +AC_ARG_WITH([stmmac-kernel], + AC_HELP_STRING( + [--with-stmmac-kernel=], + [stmmac kernel (only if differing)] + ), + [ + kernelstmmac=[$withval] + ], + [ + kernelstmmac=$linuxversion + ] +) + +if test "x${enablestmmac}" = "x1"; then + AC_MSG_CHECKING([for kernel for stmmac driver]) + + if test ! -f "${srcdir}/devices/stmmac/stmmac-${kernelstmmac}-orig.h"; then + AC_MSG_ERROR([kernel $kernelstmmac not available for stmmac driver!]) + fi + + AC_MSG_RESULT([$kernelstmmac]) +fi + +AC_SUBST(KERNEL_STMMAC,[$kernelstmmac]) + #----------------------------------------------------------------------------- # CCAT driver #----------------------------------------------------------------------------- @@ -688,7 +793,7 @@ AC_MSG_CHECKING([for RTAI path]) if test -z "${rtaidir}"; then AC_MSG_RESULT([not specified.]) else - if test \! -r ${rtaidir}/include/rtai.h; then + if test \! -x ${rtaidir}/bin/rtai-config; then AC_MSG_ERROR([no RTAI installation found in ${rtaidir}!]) fi AC_MSG_RESULT([$rtaidir]) @@ -696,6 +801,7 @@ else rtai_lxrt_cflags=`$rtaidir/bin/rtai-config --lxrt-cflags` rtai_lxrt_ldflags=`$rtaidir/bin/rtai-config --lxrt-ldflags` rtai_module_dir=`$rtaidir/bin/rtai-config --module-dir` + rtai_kernel_cflags=`$rtaidir/bin/rtai-config --kernel-cflags` fi AC_SUBST(RTAI_DIR,[$rtaidir]) @@ -705,6 +811,7 @@ AC_SUBST(ENABLE_RTAI,[$rtai]) AC_SUBST(RTAI_LXRT_CFLAGS,[$rtai_lxrt_cflags]) AC_SUBST(RTAI_LXRT_LDFLAGS,[$rtai_lxrt_ldflags]) AC_SUBST(RTAI_MODULE_DIR,[$rtai_module_dir]) +AC_SUBST(RTAI_KERNEL_CFLAGS,[$rtai_kernel_cflags]) #----------------------------------------------------------------------------- # Xenomai path (optional) @@ -1263,6 +1370,8 @@ AC_CONFIG_FILES([ devices/igc/Makefile devices/r8169/Kbuild devices/r8169/Makefile + devices/stmmac/Kbuild + devices/stmmac/Makefile ethercat.spec examples/Kbuild examples/Makefile diff --git a/devices/8139too-5.14-ethercat.c b/devices/8139too-5.14-ethercat.c index 3787ecd9..f045bb96 100644 --- a/devices/8139too-5.14-ethercat.c +++ b/devices/8139too-5.14-ethercat.c @@ -149,6 +149,17 @@ #include "../globals.h" #include "ecdev.h" +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + #define RTL8139_DRIVER_NAME DRV_NAME \ " EtherCAT-capable Fast Ethernet driver " \ DRV_VERSION ", master " EC_MASTER_VERSION @@ -662,7 +673,7 @@ struct rtl8139_private { ec_device_t *ecdev; }; -MODULE_AUTHOR("Florian Pose "); +MODULE_AUTHOR("Florian Pose "); MODULE_DESCRIPTION("RealTek RTL-8139 EtherCAT driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(EC_MASTER_VERSION); @@ -2383,8 +2394,11 @@ static int rtl8139_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(dev, addr->sa_data); +#else memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - +#endif spin_lock_irq(&tp->lock); RTL_W8_F(Cfg9346, Cfg9346_Unlock); diff --git a/devices/8139too-5.15-ethercat.c b/devices/8139too-5.15-ethercat.c index 063f78e4..bf6ad3ef 100644 --- a/devices/8139too-5.15-ethercat.c +++ b/devices/8139too-5.15-ethercat.c @@ -662,7 +662,7 @@ struct rtl8139_private { ec_device_t *ecdev; }; -MODULE_AUTHOR("Florian Pose "); +MODULE_AUTHOR("Florian Pose "); MODULE_DESCRIPTION("RealTek RTL-8139 EtherCAT driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(EC_MASTER_VERSION); diff --git a/devices/8139too-6.1-ethercat.c b/devices/8139too-6.1-ethercat.c index 4871292a..71759cdf 100644 --- a/devices/8139too-6.1-ethercat.c +++ b/devices/8139too-6.1-ethercat.c @@ -660,9 +660,18 @@ struct rtl8139_private { unsigned int regs_len; unsigned long fifo_copy_timeout; - ec_device_t *ecdev; + ec_device_t *ecdev_; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct rtl8139_private *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + MODULE_AUTHOR("Florian Pose "); MODULE_DESCRIPTION("RealTek RTL-8139 EtherCAT driver"); MODULE_LICENSE("GPL"); @@ -1083,9 +1092,10 @@ static int rtl8139_init_one(struct pci_dev *pdev, /* dev is fully set up and ready to use now */ // offer device to EtherCAT master module - tp->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ecdev_ = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ecdev_initialized = true; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { pr_debug("about to register device named %s (%p)...\n", dev->name, dev); i = register_netdev (dev); @@ -1158,10 +1168,10 @@ static int rtl8139_init_one(struct pci_dev *pdev, if (rtl_chip_info[tp->chipset].flags & HasHltClk) RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ - if (tp->ecdev) { - i = ecdev_open(tp->ecdev); + if (get_ecdev(tp)) { + i = ecdev_open(get_ecdev(tp)); if (i) { - ecdev_withdraw(tp->ecdev); + ecdev_withdraw(get_ecdev(tp)); goto err_out; } } @@ -1182,9 +1192,9 @@ static void rtl8139_remove_one(struct pci_dev *pdev) assert (dev != NULL); - if (tp->ecdev) { - ecdev_close(tp->ecdev); - ecdev_withdraw(tp->ecdev); + if (get_ecdev(tp)) { + ecdev_close(get_ecdev(tp)); + ecdev_withdraw(get_ecdev(tp)); } else { cancel_delayed_work_sync(&tp->thread); @@ -1393,7 +1403,7 @@ static int rtl8139_open (struct net_device *dev) const int irq = tp->pci_dev->irq; int retval; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); if (retval) return retval; @@ -1404,7 +1414,7 @@ static int rtl8139_open (struct net_device *dev) tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, &tp->rx_ring_dma, GFP_KERNEL); if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { - if (!tp->ecdev) { + if (!get_ecdev(tp)) { free_irq(irq, dev); } @@ -1426,7 +1436,7 @@ static int rtl8139_open (struct net_device *dev) rtl8139_init_ring (dev); rtl8139_hw_start (dev); - if (!tp->ecdev) { + if (!get_ecdev(tp)) { netif_start_queue (dev); } @@ -1437,7 +1447,7 @@ static int rtl8139_open (struct net_device *dev) irq, RTL_R8 (MediaStatus), tp->mii.full_duplex ? "full" : "half"); - if (!tp->ecdev) { + if (!get_ecdev(tp)) { rtl8139_start_thread(tp); } @@ -1449,10 +1459,10 @@ static void rtl_check_media (struct net_device *dev, unsigned int init_media) { struct rtl8139_private *tp = netdev_priv(dev); - if (tp->ecdev) { + if (get_ecdev(tp)) { void __iomem *ioaddr = tp->mmio_addr; u16 state = RTL_R16(BasicModeStatus) & BMSR_LSTATUS; - ecdev_set_link(tp->ecdev, state ? 1 : 0); + ecdev_set_link(get_ecdev(tp), state ? 1 : 0); } else { if (tp->phys[0] >= 0) { @@ -1523,7 +1533,7 @@ static void rtl8139_hw_start (struct net_device *dev) if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb))) RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); - if (!tp->ecdev) { + if (!get_ecdev(tp)) { /* Enable all known interrupts by setting the interrupt mask. */ RTL_W16 (IntrMask, rtl8139_intr_mask); } @@ -1770,7 +1780,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) if (tmp8 & CmdTxEnb) RTL_W8 (ChipCmd, CmdRxEnb); - if (tp->ecdev) { + if (get_ecdev(tp)) { rtl8139_tx_clear (tp); rtl8139_hw_start (dev); } @@ -1798,7 +1808,7 @@ static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue) struct rtl8139_private *tp = netdev_priv(dev); tp->watchdog_fired = 1; - if (!tp->ecdev && !tp->have_thread) { + if (!get_ecdev(tp) && !tp->have_thread) { INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); schedule_delayed_work(&tp->thread, next_tick); } @@ -1821,18 +1831,18 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, if (len < ETH_ZLEN) memset(tp->tx_buf[entry], 0, ETH_ZLEN); skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); - if (!tp->ecdev) { + if (!get_ecdev(tp)) { dev_kfree_skb_any(skb); } } else { - if (!tp->ecdev) { + if (!get_ecdev(tp)) { dev_kfree_skb_any(skb); } dev->stats.tx_dropped++; return NETDEV_TX_OK; } - if (!tp->ecdev) { + if (!get_ecdev(tp)) { spin_lock_irqsave(&tp->lock, flags); } /* @@ -1846,7 +1856,7 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, tp->cur_tx++; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) netif_stop_queue (dev); spin_unlock_irqrestore(&tp->lock, flags); @@ -1925,7 +1935,7 @@ static void rtl8139_tx_interrupt (struct net_device *dev, if (tp->dirty_tx != dirty_tx) { tp->dirty_tx = dirty_tx; mb(); - if (!tp->ecdev) { + if (!get_ecdev(tp)) { netif_wake_queue (dev); } } @@ -2061,7 +2071,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, RTL_R16 (RxBufAddr), RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd)); - while ((tp->ecdev || netif_running(dev)) + while ((get_ecdev(tp) || netif_running(dev)) && received < budget && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { u32 ring_offset = cur_rx % RX_BUF_LEN; @@ -2079,7 +2089,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, else pkt_size = rx_size; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { if (netif_msg_rx_status(tp)) pr_debug("%s: rtl8139_rx() status %4.4x, size %4.4x," " cur %4.4x.\n", dev->name, rx_status, @@ -2151,8 +2161,8 @@ no_early_rx: } keep_pkt: - if (tp->ecdev) { - ecdev_receive(tp->ecdev, &rx_ring[ring_offset + 4], pkt_size); + if (get_ecdev(tp)) { + ecdev_receive(get_ecdev(tp), &rx_ring[ring_offset + 4], pkt_size); } else { /* Malloc up new buffer, compatible with net-2e. */ @@ -2290,7 +2300,7 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) int link_changed = 0; /* avoid bogus "uninit" warning */ int handled = 0; - if (tp->ecdev) { + if (get_ecdev(tp)) { status = RTL_R16 (IntrStatus); } else { @@ -2308,7 +2318,7 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) if (unlikely(status == 0xFFFF)) goto out; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { /* close possible race's with dev_close */ if (unlikely(!netif_running(dev))) { RTL_W16 (IntrMask, 0); @@ -2328,7 +2338,7 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) /* Receive packets are processed by poll routine. If not running start it now. */ if (status & RxAckBits){ - if (tp->ecdev) { + if (get_ecdev(tp)) { /* EtherCAT device: Just receive all frames */ rtl8139_rx(dev, tp, 100); // FIXME } else { @@ -2351,7 +2361,7 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) RTL_W16 (IntrStatus, TxErr); } out: - if (!tp->ecdev) { + if (!get_ecdev(tp)) { spin_unlock (&tp->lock); } @@ -2405,7 +2415,7 @@ static int rtl8139_close (struct net_device *dev) void __iomem *ioaddr = tp->mmio_addr; unsigned long flags = 0; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { netif_stop_queue(dev); napi_disable(&tp->napi); @@ -2426,7 +2436,7 @@ static int rtl8139_close (struct net_device *dev) dev->stats.rx_missed_errors += RTL_R32 (RxMissed); RTL_W32 (RxMissed, 0); - if (!tp->ecdev) { + if (!get_ecdev(tp)) { spin_unlock_irqrestore (&tp->lock, flags); free_irq(tp->pci_dev->irq, dev); @@ -2654,7 +2664,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) struct rtl8139_private *tp = netdev_priv(dev); int rc; - if (tp->ecdev || !netif_running(dev)) + if (get_ecdev(tp) || !netif_running(dev)) return -EINVAL; spin_lock_irq(&tp->lock); @@ -2673,7 +2683,7 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) unsigned long flags; unsigned int start; - if (tp->ecdev || netif_running(dev)) { + if (get_ecdev(tp) || netif_running(dev)) { spin_lock_irqsave (&tp->lock, flags); dev->stats.rx_missed_errors += RTL_R32 (RxMissed); RTL_W32 (RxMissed, 0); @@ -2762,7 +2772,7 @@ static int __maybe_unused rtl8139_suspend(struct device *device) void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; - if (tp->ecdev || !netif_running (dev)) + if (get_ecdev(tp) || !netif_running (dev)) return 0; netif_device_detach (dev); @@ -2787,7 +2797,7 @@ static int __maybe_unused rtl8139_resume(struct device *device) struct net_device *dev = dev_get_drvdata(device); struct rtl8139_private *tp = netdev_priv(dev); - if (tp->ecdev || !netif_running (dev)) + if (get_ecdev(tp) || !netif_running (dev)) return 0; rtl8139_init_ring (dev); diff --git a/devices/Kbuild.in b/devices/Kbuild.in index ce299edf..a46b0410 100644 --- a/devices/Kbuild.in +++ b/devices/Kbuild.in @@ -26,6 +26,11 @@ src := @abs_srcdir@ ccflags-y := -I@abs_top_builddir@ +ifeq (@ENABLE_DRIVER_RESOURCE_VERIFYING@,1) + ccflags-y += -DEC_ENABLE_DRIVER_RESOURCE_VERIFYING +endif + + REV := $(shell if test -s $(src)/../revision; then \ cat $(src)/../revision; \ else \ @@ -88,6 +93,10 @@ else endif endif +ifeq (@ENABLE_STMMAC@,1) + obj-m += stmmac/ +endif + KBUILD_EXTRA_SYMBOLS := \ @abs_top_builddir@/$(LINUX_SYMVERS) \ @abs_top_builddir@/master/$(LINUX_SYMVERS) diff --git a/devices/Makefile.am b/devices/Makefile.am index e812e6b2..e653c468 100644 --- a/devices/Makefile.am +++ b/devices/Makefile.am @@ -28,7 +28,8 @@ SUBDIRS = \ genet \ igb \ igc \ - r8169 + r8169 \ + stmmac # using HEADERS to enable tags target noinst_HEADERS = \ diff --git a/devices/ccat/netdev.c b/devices/ccat/netdev.c index 44e7f669..b122b790 100644 --- a/devices/ccat/netdev.c +++ b/devices/ccat/netdev.c @@ -31,6 +31,17 @@ #define request_dma(X, Y) ((int)(-EINVAL)) #endif +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + #include "module.h" /** @@ -897,7 +908,7 @@ static int ccat_eth_init_netdev(struct ccat_eth_priv *priv) /* init netdev with MAC and stack callbacks */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) || (SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5) u8 mac_addr[ETH_ALEN]; if (priv->netdev->addr_len != ETH_ALEN) diff --git a/devices/create_driver_table.py b/devices/create_driver_table.py index 1940a251..42861ee5 100644 --- a/devices/create_driver_table.py +++ b/devices/create_driver_table.py @@ -28,6 +28,7 @@ from re import compile DRIVER_MAP=( # (subdir, driver name, file prefix) (".", "8139too", "8139too"), + ("stmmac", "dwmac-intel", "dwmac-intel"), (".", "e100", "e100"), ("e1000", "e1000", "e1000_main"), ("e1000e", "e1000e", "netdev"), @@ -36,6 +37,7 @@ DRIVER_MAP=( ("igc", "igc", "igc_main"), (".", "r8169", "r8169"), ("r8169", "r8169", "r8169_main"), + ("stmmac", "stmmac-pci", "stmmac_pci"), ) diff --git a/devices/e100-5.14-ethercat.c b/devices/e100-5.14-ethercat.c index 8480ed92..15b53aa7 100644 --- a/devices/e100-5.14-ethercat.c +++ b/devices/e100-5.14-ethercat.c @@ -2408,7 +2408,11 @@ static int e100_set_mac_address(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, addr->sa_data); +#else memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); +#endif e100_exec_cb(nic, NULL, e100_setup_iaaddr); return 0; @@ -3092,7 +3096,11 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e100_phy_init(nic); +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, (const u8*)nic->eeprom); +#else memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); +#endif if (!is_valid_ether_addr(netdev->dev_addr)) { if (!eeprom_bad_csum_allow) { netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); diff --git a/devices/e100-6.1-ethercat.c b/devices/e100-6.1-ethercat.c index 7bf75bc0..a8c88dd1 100644 --- a/devices/e100-6.1-ethercat.c +++ b/devices/e100-6.1-ethercat.c @@ -646,10 +646,19 @@ struct nic { __le16 eeprom[256]; spinlock_t mdio_lock; const struct firmware *fw; - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct nic *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + static inline void e100_write_flush(struct nic *nic) { /* Flush previous PCI writes through intermediate bridges @@ -661,7 +670,7 @@ static void e100_enable_irq(struct nic *nic) { unsigned long flags; - if (nic->ecdev) + if (get_ecdev(nic)) return; spin_lock_irqsave(&nic->cmd_lock, flags); @@ -674,11 +683,11 @@ static void e100_disable_irq(struct nic *nic) { unsigned long flags = 0; - if (!nic->ecdev) + if (!nic->ecdev_) spin_lock_irqsave(&nic->cmd_lock, flags); iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); e100_write_flush(nic); - if (!nic->ecdev) + if (!nic->ecdev_) spin_unlock_irqrestore(&nic->cmd_lock, flags); } @@ -868,7 +877,7 @@ static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) unsigned int i; int err = 0; - if (!nic->ecdev) + if (!get_ecdev(nic)) spin_lock_irqsave(&nic->cmd_lock, flags); /* Previous command is accepted when SCB clears */ @@ -889,7 +898,7 @@ static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) iowrite8(cmd, &nic->csr->scb.cmd_lo); err_unlock: - if (!nic->ecdev) + if (!get_ecdev(nic)) spin_unlock_irqrestore(&nic->cmd_lock, flags); return err; @@ -902,7 +911,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, unsigned long flags; int err; - if (!nic->ecdev) { + if (!get_ecdev(nic)) { spin_lock_irqsave(&nic->cb_lock, flags); } @@ -950,7 +959,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, } err_unlock: - if (!nic->ecdev) { + if (!get_ecdev(nic)) { spin_unlock_irqrestore(&nic->cb_lock, flags); } @@ -984,7 +993,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) * manipulation of the MDI control registers is a multi-step * procedure it should be done under lock. */ - if (!nic->ecdev) + if (!nic->ecdev_) /* exemption of initialization check */ spin_lock_irqsave(&nic->mdio_lock, flags); for (i = 100; i; --i) { if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) @@ -993,7 +1002,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) } if (unlikely(!i)) { netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); - if (!nic->ecdev) + if (!get_ecdev(nic)) spin_unlock_irqrestore(&nic->mdio_lock, flags); return 0; /* No way to indicate timeout error */ } @@ -1004,7 +1013,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) break; } - if (!nic->ecdev) + if (!nic->ecdev_) spin_unlock_irqrestore(&nic->mdio_lock, flags); netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", @@ -1182,7 +1191,7 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) config->multicast_all = 0x1; /* 1=accept, 0=no */ /* disable WoL when up */ - if (nic->ecdev || + if (get_ecdev(nic) || (netif_running(nic->netdev) || !(nic->flags & wol_magic))) config->magic_packet_disable = 0x1; /* 1=off, 0=on */ @@ -1748,8 +1757,8 @@ static void e100_watchdog_impl(struct nic *nic) struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; u32 speed; - if (nic->ecdev) { - ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0); + if (get_ecdev(nic)) { + ecdev_set_link(get_ecdev(nic), mii_link_ok(&nic->mii) ? 1 : 0); return; } @@ -1862,14 +1871,14 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, /* We queued the skb, but now we're out of space. */ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, "No space for CB\n"); - if (!nic->ecdev) + if (!get_ecdev(nic)) netif_stop_queue(netdev); break; case -ENOMEM: /* This is a hard error - log it. */ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, "Out of Tx resources, returning skb\n"); - if (!nic->ecdev) + if (!get_ecdev(nic)) netif_stop_queue(netdev); return NETDEV_TX_BUSY; } @@ -1883,7 +1892,7 @@ static int e100_tx_clean(struct nic *nic) struct cb *cb; int tx_cleaned = 0; - if (!nic->ecdev) + if (!get_ecdev(nic)) spin_lock(&nic->cb_lock); /* Clean CBs marked complete */ @@ -1904,7 +1913,7 @@ static int e100_tx_clean(struct nic *nic) le32_to_cpu(cb->u.tcb.tbd.buf_addr), le16_to_cpu(cb->u.tcb.tbd.size), DMA_TO_DEVICE); - if (!nic->ecdev) + if (!get_ecdev(nic)) dev_kfree_skb_any(cb->skb); cb->skb = NULL; tx_cleaned = 1; @@ -1913,7 +1922,7 @@ static int e100_tx_clean(struct nic *nic) nic->cbs_avail++; } - if (!nic->ecdev) { + if (!get_ecdev(nic)) { spin_unlock(&nic->cb_lock); /* Recover from running out of Tx resources in xmit_frame */ @@ -1934,7 +1943,7 @@ static void e100_clean_cbs(struct nic *nic) le32_to_cpu(cb->u.tcb.tbd.buf_addr), le16_to_cpu(cb->u.tcb.tbd.size), DMA_TO_DEVICE); - if (!nic->ecdev) + if (!get_ecdev(nic)) dev_kfree_skb(cb->skb); } nic->cb_to_clean = nic->cb_to_clean->next; @@ -2089,7 +2098,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, nic->ru_running = RU_SUSPENDED; } - if (!nic->ecdev) { + if (!get_ecdev(nic)) { /* Pull off the RFD and put the actual data (minus eth hdr) */ skb_reserve(skb, sizeof(struct rfd)); skb_put(skb, actual_size); @@ -2107,27 +2116,27 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, } if (unlikely(!(rfd_status & cb_ok))) { - if (!nic->ecdev) { + if (!get_ecdev(nic)) { /* Don't indicate if hardware indicates errors */ dev_kfree_skb_any(skb); } } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { /* Don't indicate oversized frames */ nic->rx_over_length_errors++; - if (!nic->ecdev) { + if (!get_ecdev(nic)) { dev_kfree_skb_any(skb); } } else { process_skb: dev->stats.rx_packets++; dev->stats.rx_bytes += (actual_size - fcs_pad); - if (nic->ecdev) { - ecdev_receive(nic->ecdev, + if (get_ecdev(nic)) { + ecdev_receive(get_ecdev(nic), skb->data + sizeof(struct rfd), actual_size - fcs_pad); // No need to detect link status as // long as frames are received: Reset watchdog. - if (ecdev_get_link(nic->ecdev)) { + if (ecdev_get_link(get_ecdev(nic))) { nic->ec_watchdog_jiffies = jiffies; } } else { @@ -2137,7 +2146,7 @@ process_skb: (*work_done)++; } - if (nic->ecdev) { + if (get_ecdev(nic)) { // make receive frame descriptior usable again memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd)); rx->dma_addr = dma_map_single(&nic->pdev->dev, skb->data, @@ -2191,7 +2200,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done, old_before_last_rx = nic->rx_to_use->prev->prev; old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; - if (!nic->ecdev) { + if (!get_ecdev(nic)) { /* Alloc new skbs to refill list */ for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { if(unlikely(e100_rx_alloc_skb(nic, rx))) @@ -2288,7 +2297,7 @@ static int e100_rx_alloc_list(struct nic *nic) } } - if (!nic->ecdev) { + if (!get_ecdev(nic)) { /* Set the el-bit on the buffer that is before the last buffer. * This lets us update the next pointer on the last buffer without * worrying about hardware touching it. @@ -2330,7 +2339,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id) if (stat_ack & stat_ack_rnr) nic->ru_running = RU_SUSPENDED; - if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) { + if (!get_ecdev(nic) && likely(napi_schedule_prep(&nic->napi))) { e100_disable_irq(nic); __napi_schedule(&nic->napi); } @@ -2418,13 +2427,13 @@ static int e100_up(struct nic *nic) goto err_clean_cbs; e100_set_multicast_list(nic->netdev); e100_start_receiver(nic, NULL); - if (!nic->ecdev) { + if (!get_ecdev(nic)) { mod_timer(&nic->watchdog, jiffies); } if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, nic->netdev->name, nic->netdev))) goto err_no_irq; - if (!nic->ecdev) { + if (!get_ecdev(nic)) { netif_wake_queue(nic->netdev); napi_enable(&nic->napi); /* enable ints _after_ enabling poll, preventing a race between @@ -2434,7 +2443,7 @@ static int e100_up(struct nic *nic) return 0; err_no_irq: - if (!nic->ecdev) + if (!get_ecdev(nic)) del_timer_sync(&nic->watchdog); err_clean_cbs: e100_clean_cbs(nic); @@ -2445,14 +2454,14 @@ err_rx_clean_list: static void e100_down(struct nic *nic) { - if (!nic->ecdev) { + if (!get_ecdev(nic)) { /* wait here for poll to complete */ napi_disable(&nic->napi); netif_stop_queue(nic->netdev); } e100_hw_reset(nic); free_irq(nic->pdev->irq, nic->netdev); - if (!nic->ecdev) { + if (!get_ecdev(nic)) { del_timer_sync(&nic->watchdog); netif_carrier_off(nic->netdev); } @@ -2929,7 +2938,7 @@ static int e100_open(struct net_device *netdev) struct nic *nic = netdev_priv(netdev); int err = 0; - if (!nic->ecdev) + if (!get_ecdev(nic)) netif_carrier_off(netdev); if ((err = e100_up(nic))) netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); @@ -3089,9 +3098,10 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_pme_active(pdev, false); // offer device to EtherCAT master module - nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE); + nic->ecdev_ = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE); + nic->ecdev_initialized = true; - if (!nic->ecdev) { + if (!get_ecdev(nic)) { strcpy(netdev->name, "eth%d"); if ((err = register_netdev(netdev))) { netif_err(nic, probe, nic->netdev, @@ -3115,10 +3125,10 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), pdev->irq, netdev->dev_addr); - if (nic->ecdev) { - err = ecdev_open(nic->ecdev); + if (get_ecdev(nic)) { + err = ecdev_open(get_ecdev(nic)); if (err) { - ecdev_withdraw(nic->ecdev); + ecdev_withdraw(get_ecdev(nic)); goto err_out_free; } } @@ -3146,9 +3156,9 @@ static void e100_remove(struct pci_dev *pdev) if (netdev) { struct nic *nic = netdev_priv(netdev); - if (nic->ecdev) { - ecdev_close(nic->ecdev); - ecdev_withdraw(nic->ecdev); + if (get_ecdev(nic)) { + ecdev_close(get_ecdev(nic)); + ecdev_withdraw(get_ecdev(nic)); } else { unregister_netdev(netdev); } @@ -3263,7 +3273,7 @@ static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); - if (nic->ecdev) + if (get_ecdev(nic)) return -EBUSY; netif_device_detach(netdev); @@ -3290,7 +3300,7 @@ static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); - if (nic->ecdev) + if (get_ecdev(nic)) return -EBUSY; if (pci_enable_device(pdev)) { @@ -3323,11 +3333,11 @@ static void e100_io_resume(struct pci_dev *pdev) /* ack any pending wake events, disable PME */ pci_enable_wake(pdev, PCI_D0, 0); - if (!nic->ecdev) + if (!get_ecdev(nic)) netif_device_attach(netdev); - if (nic->ecdev || netif_running(netdev)) { + if (get_ecdev(nic) || netif_running(netdev)) { e100_open(netdev); - if (!nic->ecdev) + if (!get_ecdev(nic)) mod_timer(&nic->watchdog, jiffies); } } diff --git a/devices/e1000/Kbuild.in b/devices/e1000/Kbuild.in index b3738dd8..1bf2fd9a 100644 --- a/devices/e1000/Kbuild.in +++ b/devices/e1000/Kbuild.in @@ -43,6 +43,10 @@ ifeq (@ENABLE_E1000@,1) obj-m += ec_e1000.o ec_e1000-objs := $(EC_E1000_OBJ) CFLAGS_e1000_main-@KERNEL_E1000@-ethercat.o = -DREV=$(REV) + + ifeq (@ENABLE_DRIVER_RESOURCE_VERIFYING@,1) + ccflags-y := -DEC_ENABLE_DRIVER_RESOURCE_VERIFYING + endif endif KBUILD_EXTRA_SYMBOLS := \ diff --git a/devices/e1000/e1000-5.14-ethercat.h b/devices/e1000/e1000-5.14-ethercat.h index 71aa37ad..f25cd253 100644 --- a/devices/e1000/e1000-5.14-ethercat.h +++ b/devices/e1000/e1000-5.14-ethercat.h @@ -303,11 +303,20 @@ struct e1000_adapter { struct delayed_work fifo_stall_task; struct delayed_work phy_info_task; - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct e1000_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + enum e1000_state_t { __E1000_TESTING, __E1000_RESETTING, diff --git a/devices/e1000/e1000-6.1-ethercat.h b/devices/e1000/e1000-6.1-ethercat.h index 0769b47c..9e986c25 100644 --- a/devices/e1000/e1000-6.1-ethercat.h +++ b/devices/e1000/e1000-6.1-ethercat.h @@ -303,11 +303,20 @@ struct e1000_adapter { struct delayed_work fifo_stall_task; struct delayed_work phy_info_task; - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct e1000_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + enum e1000_state_t { __E1000_TESTING, __E1000_RESETTING, diff --git a/devices/e1000/e1000_main-4.4-ethercat.c b/devices/e1000/e1000_main-4.4-ethercat.c index 18042c1f..3d6f0ccb 100644 --- a/devices/e1000/e1000_main-4.4-ethercat.c +++ b/devices/e1000/e1000_main-4.4-ethercat.c @@ -138,6 +138,7 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev); static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); void ec_poll(struct net_device *); +static void ec_kick_watchdog(struct irq_work *work); static irqreturn_t e1000_intr(int irq, void *data); static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring); @@ -527,9 +528,7 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) { set_bit(__E1000_DOWN, &adapter->flags); - if (!adapter->ecdev) { - cancel_delayed_work_sync(&adapter->watchdog_task); - } + cancel_delayed_work_sync(&adapter->watchdog_task); /* * Since the watchdog task can reschedule other tasks, we should cancel @@ -1328,12 +1327,14 @@ static void e1000_remove(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; bool disable_dev; + if (adapter->ecdev) + irq_work_sync(&adapter->ec_watchdog_kicker); + e1000_down_and_stop(adapter); e1000_release_manageability(adapter); if (adapter->ecdev) { ecdev_close(adapter->ecdev); - irq_work_sync(&adapter->ec_watchdog_kicker); ecdev_withdraw(adapter->ecdev); } else { unregister_netdev(netdev); @@ -3864,7 +3865,7 @@ static void ec_kick_watchdog(struct irq_work *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, ec_watchdog_kicker); - schedule_work(&adapter->watchdog_task); + schedule_delayed_work(&adapter->watchdog_task, 1); } void ec_poll(struct net_device *netdev) diff --git a/devices/e1000/e1000_main-5.10-ethercat.c b/devices/e1000/e1000_main-5.10-ethercat.c index 018f1168..7dd7b2db 100644 --- a/devices/e1000/e1000_main-5.10-ethercat.c +++ b/devices/e1000/e1000_main-5.10-ethercat.c @@ -108,6 +108,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); void ec_poll(struct net_device *); +static void ec_kick_watchdog(struct irq_work *work); static irqreturn_t e1000_intr(int irq, void *data); static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring); @@ -496,9 +497,7 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) { set_bit(__E1000_DOWN, &adapter->flags); - if (!adapter->ecdev) { - cancel_delayed_work_sync(&adapter->watchdog_task); - } + cancel_delayed_work_sync(&adapter->watchdog_task); /* * Since the watchdog task can reschedule other tasks, we should cancel @@ -1309,12 +1308,14 @@ static void e1000_remove(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; bool disable_dev; + if (adapter->ecdev) + irq_work_sync(&adapter->ec_watchdog_kicker); + e1000_down_and_stop(adapter); e1000_release_manageability(adapter); if (adapter->ecdev) { ecdev_close(adapter->ecdev); - irq_work_sync(&adapter->ec_watchdog_kicker); ecdev_withdraw(adapter->ecdev); } else { unregister_netdev(netdev); @@ -3842,7 +3843,7 @@ static void ec_kick_watchdog(struct irq_work *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, ec_watchdog_kicker); - schedule_work(&adapter->watchdog_task); + schedule_delayed_work(&adapter->watchdog_task, 1); } void ec_poll(struct net_device *netdev) diff --git a/devices/e1000/e1000_main-5.14-ethercat.c b/devices/e1000/e1000_main-5.14-ethercat.c index ad3e681b..128d353e 100644 --- a/devices/e1000/e1000_main-5.14-ethercat.c +++ b/devices/e1000/e1000_main-5.14-ethercat.c @@ -8,6 +8,17 @@ #include #include +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + char e1000_driver_name[] = "ec_e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -108,6 +119,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); void ec_poll(struct net_device *); +static void ec_kick_watchdog(struct irq_work *work); static irqreturn_t e1000_intr(int irq, void *data); static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring); @@ -259,7 +271,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) int irq_flags = IRQF_SHARED; int err; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return 0; } @@ -276,7 +288,7 @@ static void e1000_free_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -291,7 +303,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) { + if (!adapter->ecdev_) { return; } @@ -308,7 +320,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -395,7 +407,7 @@ static void e1000_configure(struct e1000_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { struct e1000_rx_ring *ring = &adapter->rx_ring[i]; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* fill rx ring completely! */ adapter->alloc_rx_buf(adapter, ring, ring->count); } else { @@ -415,7 +427,7 @@ int e1000_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->flags); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_enable(&adapter->napi); e1000_irq_enable(adapter); @@ -496,9 +508,7 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) { set_bit(__E1000_DOWN, &adapter->flags); - if (!adapter->ecdev) { - cancel_delayed_work_sync(&adapter->watchdog_task); - } + cancel_delayed_work_sync(&adapter->watchdog_task); /* * Since the watchdog task can reschedule other tasks, we should cancel @@ -506,13 +516,13 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) * still running after the adapter has been turned down. */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { cancel_delayed_work_sync(&adapter->phy_info_task); cancel_delayed_work_sync(&adapter->fifo_stall_task); } /* Only kill reset task if adapter is not resetting */ - if (!adapter->ecdev && !test_bit(__E1000_RESETTING, &adapter->flags)) + if (!get_ecdev(adapter) && !test_bit(__E1000_RESETTING, &adapter->flags)) cancel_work_sync(&adapter->reset_task); } @@ -527,7 +537,7 @@ void e1000_down(struct e1000_adapter *adapter) ew32(RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_disable(netdev); } @@ -547,7 +557,7 @@ void e1000_down(struct e1000_adapter *adapter) * in the hardware until the next IFF_DOWN+IFF_UP cycle. */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_carrier_off(netdev); napi_disable(&adapter->napi); @@ -1002,6 +1012,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); adapter->bars = bars; adapter->need_ioport = need_ioport; + adapter->ecdev_initialized = 0; hw = &adapter->hw; hw->back = adapter; @@ -1137,7 +1148,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e_err(probe, "EEPROM Read Error\n"); } /* don't block initialization here due to bad MAC address */ +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, hw->mac_addr); +#else memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); +#endif if (!is_valid_ether_addr(netdev->dev_addr)) e_err(probe, "Invalid MAC Address\n"); @@ -1232,12 +1247,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e1000_reset(adapter); // offer device to EtherCAT master module - adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); - if (adapter->ecdev) { + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = 1; + if (get_ecdev(adapter)) { init_irq_work(&adapter->ec_watchdog_kicker, ec_kick_watchdog); - err = ecdev_open(adapter->ecdev); + err = ecdev_open(get_ecdev(adapter)); if (err) { - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); goto err_register; } } else { @@ -1259,7 +1275,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), netdev->dev_addr); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); } @@ -1309,13 +1325,15 @@ static void e1000_remove(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; bool disable_dev; + if (get_ecdev(adapter)) + irq_work_sync(&adapter->ec_watchdog_kicker); + e1000_down_and_stop(adapter); e1000_release_manageability(adapter); - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); - irq_work_sync(&adapter->ec_watchdog_kicker); - ecdev_withdraw(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); + ecdev_withdraw(get_ecdev(adapter)); } else { unregister_netdev(netdev); } @@ -1448,7 +1466,7 @@ int e1000_open(struct net_device *netdev) /* From here on the code is the same as e1000_up() */ clear_bit(__E1000_DOWN, &adapter->flags); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_enable(&adapter->napi); e1000_irq_enable(adapter); @@ -1910,7 +1928,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rdlen, rctl, rxcsum; - if (!adapter->ecdev && adapter->netdev->mtu > ETH_DATA_LEN) { + if (!get_ecdev(adapter) && adapter->netdev->mtu > ETH_DATA_LEN) { rdlen = adapter->rx_ring[0].count * sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_jumbo_rx_irq; @@ -2010,7 +2028,7 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, struct e1000_tx_buffer *buffer_info) { - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -2220,7 +2238,7 @@ static void e1000_enter_82542_rst(struct e1000_adapter *adapter) E1000_WRITE_FLUSH(); mdelay(5); - if (!adapter->ecdev && netif_running(netdev)) + if (!get_ecdev(adapter) && netif_running(netdev)) e1000_clean_all_rx_rings(adapter); } @@ -2243,7 +2261,7 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter) /* No need to loop, because 82542 supports only 1 queue */ struct e1000_rx_ring *ring = &adapter->rx_ring[0]; e1000_configure_rx(adapter); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* fill rx ring completely! */ adapter->alloc_rx_buf(adapter, ring, ring->count); } else { @@ -2275,7 +2293,11 @@ static int e1000_set_mac(struct net_device *netdev, void *p) if (hw->mac_type == e1000_82542_rev2_0) e1000_enter_82542_rst(adapter); +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, addr->sa_data); +#else memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); +#endif memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); e1000_rar_set(hw, hw->mac_addr, 0); @@ -2499,12 +2521,12 @@ static void e1000_watchdog(struct work_struct *work) u32 link, tctl; link = e1000_has_link(adapter); - if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link) + if (!get_ecdev(adapter) && (netif_carrier_ok(netdev)) && link) goto link_up; if (link) { - if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) || - (!adapter->ecdev && !netif_carrier_ok(netdev))) { + if ((get_ecdev(adapter) && !ecdev_get_link(get_ecdev(adapter))) || + (!get_ecdev(adapter) && !netif_carrier_ok(netdev))) { u32 ctrl; /* update snapshot of PHY registers on LSC */ e1000_get_speed_and_duplex(hw, @@ -2539,8 +2561,8 @@ static void e1000_watchdog(struct work_struct *work) tctl |= E1000_TCTL_EN; ew32(TCTL, tctl); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 1); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 1); } else { netif_carrier_on(netdev); @@ -2551,15 +2573,15 @@ static void e1000_watchdog(struct work_struct *work) adapter->smartspeed = 0; } } else { - if ((adapter->ecdev && ecdev_get_link(adapter->ecdev)) - || (!adapter->ecdev && netif_carrier_ok(netdev))) { + if ((get_ecdev(adapter) && ecdev_get_link(get_ecdev(adapter))) + || (!get_ecdev(adapter) && netif_carrier_ok(netdev))) { adapter->link_speed = 0; adapter->link_duplex = 0; pr_info("%s NIC Link is Down\n", netdev->name); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); @@ -2587,7 +2609,7 @@ link_up: e1000_update_adaptive(hw); - if (!adapter->ecdev && !netif_carrier_ok(netdev)) { + if (!get_ecdev(adapter) && !netif_carrier_ok(netdev)) { if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going @@ -2623,7 +2645,7 @@ link_up: adapter->detect_tx_hung = true; /* Reschedule the task */ - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->flags)) schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); } @@ -3144,7 +3166,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_tx_ring *tx_ring = adapter->tx_ring; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EBUSY; } @@ -3240,7 +3262,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (!__pskb_pull_tail(skb, pull_size)) { e_err(drv, "__pskb_pull_tail " "failed.\n"); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(skb); } return NETDEV_TX_OK; @@ -3291,7 +3313,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (unlikely((hw->mac_type == e1000_82547) && (e1000_82547_fifo_workaround(adapter, skb)))) { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_stop_queue(netdev); if (!test_bit(__E1000_DOWN, &adapter->flags)) schedule_delayed_work(&adapter->fifo_stall_task, 1); @@ -3309,7 +3331,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tso = e1000_tso(adapter, tx_ring, skb, protocol); if (tso < 0) { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(skb); } return NETDEV_TX_OK; @@ -3341,7 +3363,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, */ int desc_needed = MAX_SKB_FRAGS + 7; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_sent_queue(netdev, skb->len); skb_tx_timestamp(skb); } @@ -3358,12 +3380,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); - if (adapter->ecdev || !netdev_xmit_more() || + if (get_ecdev(adapter) || !netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); } } else { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(skb); } tx_ring->buffer_info[first].time_stamp = 0; @@ -3614,7 +3636,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) struct e1000_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EBUSY; } @@ -3700,7 +3722,7 @@ void e1000_update_stats(struct e1000_adapter *adapter) if (pci_channel_offline(pdev)) return; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { spin_lock_irqsave(&adapter->stats_lock, flags); } @@ -3831,7 +3853,7 @@ void e1000_update_stats(struct e1000_adapter *adapter) adapter->stats.mgpdc += er32(MGTPDC); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { spin_unlock_irqrestore(&adapter->stats_lock, flags); } } @@ -3841,7 +3863,7 @@ static void ec_kick_watchdog(struct irq_work *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, ec_watchdog_kicker); - schedule_work(&adapter->watchdog_task); + schedule_delayed_work(&adapter->watchdog_task, 1); } void ec_poll(struct net_device *netdev) @@ -3880,11 +3902,11 @@ static irqreturn_t e1000_intr(int irq, void *data) if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { hw->get_link_status = 1; /* guard against interrupt when we're going down */ - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->flags)) schedule_delayed_work(&adapter->watchdog_task, 1); } - if (adapter->ecdev) { + if (get_ecdev(adapter)) { int i, ec_work_done = 0; for (i = 0; i < E1000_MAX_INTR; i++) { if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring, @@ -4002,12 +4024,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, */ smp_store_release(&tx_ring->next_to_clean, i); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_completed_queue(netdev, pkts_compl, bytes_compl); } #define TX_WAKE_THRESHOLD 32 - if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) && + if (!get_ecdev(adapter) && unlikely(count && netif_carrier_ok(netdev) && E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. @@ -4021,7 +4043,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, } } - if (!adapter->ecdev && adapter->detect_tx_hung) { + if (!get_ecdev(adapter) && adapter->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ @@ -4509,7 +4531,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, length = le16_to_cpu(rx_desc->length); data = buffer_info->rxbuf.data; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { prefetch(data); skb = e1000_copybreak(adapter, buffer_info, length, data); if (!skb) { @@ -4553,7 +4575,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (adapter->discarding) { /* All receives must fit into a single buffer */ netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb(skb); } if (status & E1000_RXD_STAT_EOP) @@ -4569,7 +4591,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, } else if (netdev->features & NETIF_F_RXALL) { goto process_skb; } else { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb(skb); } goto next_desc; @@ -4586,7 +4608,7 @@ process_skb: */ length -= 4; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (buffer_info->rxbuf.data == NULL) skb_put(skb, length); else /* copybreak skb */ @@ -4599,10 +4621,10 @@ process_skb: le16_to_cpu(rx_desc->csum), skb); } - if (adapter->ecdev) { + if (get_ecdev(adapter)) { dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, adapter->rx_buffer_len, DMA_FROM_DEVICE); - ecdev_receive(adapter->ecdev, data, length); + ecdev_receive(get_ecdev(adapter), data, length); // No need to detect link status as // long as frames are received: Reset watchdog. @@ -4921,7 +4943,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, data->phy_id = hw->phy_addr; break; case SIOCGMIIREG: - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EPERM; } spin_lock_irqsave(&adapter->stats_lock, flags); @@ -4933,7 +4955,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, spin_unlock_irqrestore(&adapter->stats_lock, flags); break; case SIOCSMIIREG: - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EPERM; } if (data->reg_num & ~(0x1F)) @@ -5223,7 +5245,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) u32 ctrl, ctrl_ext, rctl, status; u32 wufc = adapter->wol; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EBUSY; } @@ -5319,7 +5341,7 @@ static int __maybe_unused e1000_resume(struct device *dev) struct e1000_hw *hw = &adapter->hw; u32 err; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EBUSY; } @@ -5355,7 +5377,7 @@ static int __maybe_unused e1000_resume(struct device *dev) if (netif_running(netdev)) e1000_up(adapter); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_device_attach(netdev); } diff --git a/devices/e1000/e1000_main-5.15-ethercat.c b/devices/e1000/e1000_main-5.15-ethercat.c index 082150e9..fb18874a 100644 --- a/devices/e1000/e1000_main-5.15-ethercat.c +++ b/devices/e1000/e1000_main-5.15-ethercat.c @@ -108,6 +108,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); void ec_poll(struct net_device *); +static void ec_kick_watchdog(struct irq_work *work); static irqreturn_t e1000_intr(int irq, void *data); static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring); @@ -496,9 +497,7 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) { set_bit(__E1000_DOWN, &adapter->flags); - if (!adapter->ecdev) { - cancel_delayed_work_sync(&adapter->watchdog_task); - } + cancel_delayed_work_sync(&adapter->watchdog_task); /* * Since the watchdog task can reschedule other tasks, we should cancel @@ -1309,12 +1308,14 @@ static void e1000_remove(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; bool disable_dev; + if (adapter->ecdev) + irq_work_sync(&adapter->ec_watchdog_kicker); + e1000_down_and_stop(adapter); e1000_release_manageability(adapter); if (adapter->ecdev) { ecdev_close(adapter->ecdev); - irq_work_sync(&adapter->ec_watchdog_kicker); ecdev_withdraw(adapter->ecdev); } else { unregister_netdev(netdev); @@ -3841,7 +3842,7 @@ static void ec_kick_watchdog(struct irq_work *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, ec_watchdog_kicker); - schedule_work(&adapter->watchdog_task); + schedule_delayed_work(&adapter->watchdog_task, 1); } void ec_poll(struct net_device *netdev) diff --git a/devices/e1000/e1000_main-6.1-ethercat.c b/devices/e1000/e1000_main-6.1-ethercat.c index 1898e511..8d8518ac 100644 --- a/devices/e1000/e1000_main-6.1-ethercat.c +++ b/devices/e1000/e1000_main-6.1-ethercat.c @@ -108,6 +108,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); void ec_poll(struct net_device *); +static void ec_kick_watchdog(struct irq_work *work); static irqreturn_t e1000_intr(int irq, void *data); static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring); @@ -259,7 +260,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) int irq_flags = IRQF_SHARED; int err; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return 0; } @@ -276,7 +277,7 @@ static void e1000_free_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -291,7 +292,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) { + if (adapter->ecdev_) { return; } @@ -308,7 +309,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -395,7 +396,7 @@ static void e1000_configure(struct e1000_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { struct e1000_rx_ring *ring = &adapter->rx_ring[i]; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* fill rx ring completely! */ adapter->alloc_rx_buf(adapter, ring, ring->count); } else { @@ -415,7 +416,7 @@ int e1000_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->flags); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_enable(&adapter->napi); e1000_irq_enable(adapter); @@ -496,9 +497,7 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) { set_bit(__E1000_DOWN, &adapter->flags); - if (!adapter->ecdev) { - cancel_delayed_work_sync(&adapter->watchdog_task); - } + cancel_delayed_work_sync(&adapter->watchdog_task); /* * Since the watchdog task can reschedule other tasks, we should cancel @@ -506,13 +505,13 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) * still running after the adapter has been turned down. */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { cancel_delayed_work_sync(&adapter->phy_info_task); cancel_delayed_work_sync(&adapter->fifo_stall_task); } /* Only kill reset task if adapter is not resetting */ - if (!adapter->ecdev && !test_bit(__E1000_RESETTING, &adapter->flags)) + if (!get_ecdev(adapter) && !test_bit(__E1000_RESETTING, &adapter->flags)) cancel_work_sync(&adapter->reset_task); } @@ -527,7 +526,7 @@ void e1000_down(struct e1000_adapter *adapter) ew32(RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_disable(netdev); } @@ -547,7 +546,7 @@ void e1000_down(struct e1000_adapter *adapter) * in the hardware until the next IFF_DOWN+IFF_UP cycle. */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_carrier_off(netdev); napi_disable(&adapter->napi); @@ -1002,6 +1001,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); adapter->bars = bars; adapter->need_ioport = need_ioport; + adapter->ecdev_initialized = 0; hw = &adapter->hw; hw->back = adapter; @@ -1232,12 +1232,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e1000_reset(adapter); // offer device to EtherCAT master module - adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); - if (adapter->ecdev) { + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = 1; + if (get_ecdev(adapter)) { init_irq_work(&adapter->ec_watchdog_kicker, ec_kick_watchdog); - err = ecdev_open(adapter->ecdev); + err = ecdev_open(get_ecdev(adapter)); if (err) { - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); goto err_register; } } else { @@ -1259,7 +1260,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), netdev->dev_addr); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); } @@ -1309,13 +1310,15 @@ static void e1000_remove(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; bool disable_dev; + if (get_ecdev(adapter)) + irq_work_sync(&adapter->ec_watchdog_kicker); + e1000_down_and_stop(adapter); e1000_release_manageability(adapter); - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); - irq_work_sync(&adapter->ec_watchdog_kicker); - ecdev_withdraw(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); + ecdev_withdraw(get_ecdev(adapter)); } else { unregister_netdev(netdev); } @@ -1448,7 +1451,7 @@ int e1000_open(struct net_device *netdev) /* From here on the code is the same as e1000_up() */ clear_bit(__E1000_DOWN, &adapter->flags); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_enable(&adapter->napi); e1000_irq_enable(adapter); @@ -1910,7 +1913,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rdlen, rctl, rxcsum; - if (!adapter->ecdev && adapter->netdev->mtu > ETH_DATA_LEN) { + if (!get_ecdev(adapter) && adapter->netdev->mtu > ETH_DATA_LEN) { rdlen = adapter->rx_ring[0].count * sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_jumbo_rx_irq; @@ -2011,7 +2014,7 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, struct e1000_tx_buffer *buffer_info, int budget) { - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -2221,7 +2224,7 @@ static void e1000_enter_82542_rst(struct e1000_adapter *adapter) E1000_WRITE_FLUSH(); mdelay(5); - if (!adapter->ecdev && netif_running(netdev)) + if (!get_ecdev(adapter) && netif_running(netdev)) e1000_clean_all_rx_rings(adapter); } @@ -2244,7 +2247,7 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter) /* No need to loop, because 82542 supports only 1 queue */ struct e1000_rx_ring *ring = &adapter->rx_ring[0]; e1000_configure_rx(adapter); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* fill rx ring completely! */ adapter->alloc_rx_buf(adapter, ring, ring->count); } else { @@ -2500,12 +2503,12 @@ static void e1000_watchdog(struct work_struct *work) u32 link, tctl; link = e1000_has_link(adapter); - if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link) + if (!get_ecdev(adapter) && (netif_carrier_ok(netdev)) && link) goto link_up; if (link) { - if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) || - (!adapter->ecdev && !netif_carrier_ok(netdev))) { + if ((get_ecdev(adapter) && !ecdev_get_link(get_ecdev(adapter))) || + (!get_ecdev(adapter) && !netif_carrier_ok(netdev))) { u32 ctrl; /* update snapshot of PHY registers on LSC */ e1000_get_speed_and_duplex(hw, @@ -2540,8 +2543,8 @@ static void e1000_watchdog(struct work_struct *work) tctl |= E1000_TCTL_EN; ew32(TCTL, tctl); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 1); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 1); } else { netif_carrier_on(netdev); @@ -2552,15 +2555,15 @@ static void e1000_watchdog(struct work_struct *work) adapter->smartspeed = 0; } } else { - if ((adapter->ecdev && ecdev_get_link(adapter->ecdev)) - || (!adapter->ecdev && netif_carrier_ok(netdev))) { + if ((get_ecdev(adapter) && ecdev_get_link(get_ecdev(adapter))) + || (!get_ecdev(adapter) && netif_carrier_ok(netdev))) { adapter->link_speed = 0; adapter->link_duplex = 0; pr_info("%s NIC Link is Down\n", netdev->name); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); @@ -2588,7 +2591,7 @@ link_up: e1000_update_adaptive(hw); - if (!adapter->ecdev && !netif_carrier_ok(netdev)) { + if (!get_ecdev(adapter) && !netif_carrier_ok(netdev)) { if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going @@ -2624,7 +2627,7 @@ link_up: adapter->detect_tx_hung = true; /* Reschedule the task */ - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->flags)) schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); } @@ -3145,7 +3148,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_tx_ring *tx_ring = adapter->tx_ring; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EBUSY; } @@ -3241,7 +3244,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (!__pskb_pull_tail(skb, pull_size)) { e_err(drv, "__pskb_pull_tail " "failed.\n"); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(skb); } return NETDEV_TX_OK; @@ -3292,7 +3295,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (unlikely((hw->mac_type == e1000_82547) && (e1000_82547_fifo_workaround(adapter, skb)))) { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_stop_queue(netdev); if (!test_bit(__E1000_DOWN, &adapter->flags)) schedule_delayed_work(&adapter->fifo_stall_task, 1); @@ -3310,7 +3313,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tso = e1000_tso(adapter, tx_ring, skb, protocol); if (tso < 0) { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(skb); } return NETDEV_TX_OK; @@ -3342,7 +3345,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, */ int desc_needed = MAX_SKB_FRAGS + 7; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_sent_queue(netdev, skb->len); skb_tx_timestamp(skb); } @@ -3359,12 +3362,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); - if (adapter->ecdev || !netdev_xmit_more() || + if (get_ecdev(adapter) || !netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); } } else { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(skb); } tx_ring->buffer_info[first].time_stamp = 0; @@ -3615,7 +3618,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) struct e1000_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EBUSY; } @@ -3701,7 +3704,7 @@ void e1000_update_stats(struct e1000_adapter *adapter) if (pci_channel_offline(pdev)) return; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { spin_lock_irqsave(&adapter->stats_lock, flags); } @@ -3832,7 +3835,7 @@ void e1000_update_stats(struct e1000_adapter *adapter) adapter->stats.mgpdc += er32(MGTPDC); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { spin_unlock_irqrestore(&adapter->stats_lock, flags); } } @@ -3842,7 +3845,7 @@ static void ec_kick_watchdog(struct irq_work *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, ec_watchdog_kicker); - schedule_work(&adapter->watchdog_task); + schedule_delayed_work(&adapter->watchdog_task, 1); } void ec_poll(struct net_device *netdev) @@ -3881,11 +3884,11 @@ static irqreturn_t e1000_intr(int irq, void *data) if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { hw->get_link_status = 1; /* guard against interrupt when we're going down */ - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->flags)) schedule_delayed_work(&adapter->watchdog_task, 1); } - if (adapter->ecdev) { + if (get_ecdev(adapter)) { int i, ec_work_done = 0; for (i = 0; i < E1000_MAX_INTR; i++) { if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring, @@ -4004,12 +4007,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, */ smp_store_release(&tx_ring->next_to_clean, i); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_completed_queue(netdev, pkts_compl, bytes_compl); } #define TX_WAKE_THRESHOLD 32 - if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) && + if (!get_ecdev(adapter) && unlikely(count && netif_carrier_ok(netdev) && E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. @@ -4023,7 +4026,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, } } - if (!adapter->ecdev && adapter->detect_tx_hung) { + if (!get_ecdev(adapter) && adapter->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ @@ -4511,7 +4514,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, length = le16_to_cpu(rx_desc->length); data = buffer_info->rxbuf.data; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { prefetch(data); skb = e1000_copybreak(adapter, buffer_info, length, data); if (!skb) { @@ -4555,7 +4558,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (adapter->discarding) { /* All receives must fit into a single buffer */ netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb(skb); } if (status & E1000_RXD_STAT_EOP) @@ -4571,7 +4574,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, } else if (netdev->features & NETIF_F_RXALL) { goto process_skb; } else { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb(skb); } goto next_desc; @@ -4588,7 +4591,7 @@ process_skb: */ length -= 4; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (buffer_info->rxbuf.data == NULL) skb_put(skb, length); else /* copybreak skb */ @@ -4601,10 +4604,10 @@ process_skb: le16_to_cpu(rx_desc->csum), skb); } - if (adapter->ecdev) { + if (get_ecdev(adapter)) { dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, adapter->rx_buffer_len, DMA_FROM_DEVICE); - ecdev_receive(adapter->ecdev, data, length); + ecdev_receive(get_ecdev(adapter), data, length); // No need to detect link status as // long as frames are received: Reset watchdog. @@ -4923,7 +4926,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, data->phy_id = hw->phy_addr; break; case SIOCGMIIREG: - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EPERM; } spin_lock_irqsave(&adapter->stats_lock, flags); @@ -4935,7 +4938,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, spin_unlock_irqrestore(&adapter->stats_lock, flags); break; case SIOCSMIIREG: - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EPERM; } if (data->reg_num & ~(0x1F)) @@ -5225,7 +5228,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) u32 ctrl, ctrl_ext, rctl, status; u32 wufc = adapter->wol; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EBUSY; } @@ -5321,7 +5324,7 @@ static int __maybe_unused e1000_resume(struct device *dev) struct e1000_hw *hw = &adapter->hw; u32 err; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return -EBUSY; } @@ -5357,7 +5360,7 @@ static int __maybe_unused e1000_resume(struct device *dev) if (netif_running(netdev)) e1000_up(adapter); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_device_attach(netdev); } diff --git a/devices/e1000e/Kbuild.in b/devices/e1000e/Kbuild.in index a578d296..7712067c 100644 --- a/devices/e1000e/Kbuild.in +++ b/devices/e1000e/Kbuild.in @@ -70,6 +70,10 @@ ifeq (@ENABLE_E1000E@,1) ec_e1000e-objs := $(EC_E1000E_OBJ) CFLAGS_netdev-@KERNEL_E1000E@-ethercat.o = -DREV=$(REV) + + ifeq (@ENABLE_DRIVER_RESOURCE_VERIFYING@,1) + ccflags-y := -DEC_ENABLE_DRIVER_RESOURCE_VERIFYING + endif endif KBUILD_EXTRA_SYMBOLS := \ diff --git a/devices/e1000e/e1000-5.14-ethercat.h b/devices/e1000e/e1000-5.14-ethercat.h index 2ab0fff1..5e16a3de 100644 --- a/devices/e1000e/e1000-5.14-ethercat.h +++ b/devices/e1000e/e1000-5.14-ethercat.h @@ -336,11 +336,20 @@ struct e1000_adapter { u16 eee_advert; /* EtherCAT device variables */ - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct e1000_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + struct e1000_info { enum e1000_mac_type mac; unsigned int flags; diff --git a/devices/e1000e/e1000-6.1-ethercat.h b/devices/e1000e/e1000-6.1-ethercat.h index 0210c7f5..eb2a1f03 100644 --- a/devices/e1000e/e1000-6.1-ethercat.h +++ b/devices/e1000e/e1000-6.1-ethercat.h @@ -338,11 +338,20 @@ struct e1000_adapter { u16 eee_advert; /* EtherCAT device variables */ - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct e1000_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + struct e1000_info { enum e1000_mac_type mac; unsigned int flags; diff --git a/devices/e1000e/netdev-5.14-ethercat.c b/devices/e1000e/netdev-5.14-ethercat.c index f510fcb8..10a40dc9 100644 --- a/devices/e1000e/netdev-5.14-ethercat.c +++ b/devices/e1000e/netdev-5.14-ethercat.c @@ -29,12 +29,23 @@ #include "e1000-5.14-ethercat.h" +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + char e1000e_driver_name[] = "ec_e1000e"; static inline int check_arbiter_wa_flag(const struct e1000_adapter *adapter) { #ifdef EC_DISABLE_E1000E_WORKAROUND - return !adapter->ecdev && (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA); + return !get_ecdev(adapter) && (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA); #else return adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA; #endif @@ -948,7 +959,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, skb = buffer_info->skb; - if (!adapter->ecdev) + if (!get_ecdev(adapter)) buffer_info->skb = NULL; prefetch(skb->data - NET_IP_ALIGN); @@ -988,7 +999,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, goto next_desc; } - if (unlikely(!adapter->ecdev && + if (unlikely(!get_ecdev(adapter) && (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && !(netdev->features & NETIF_F_RXALL))) { /* recycle */ @@ -1015,7 +1026,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, * performance for small packets with large amounts * of reassembly being done in the stack */ - if (!adapter->ecdev && length < copybreak) { + if (!get_ecdev(adapter) && length < copybreak) { struct sk_buff *new_skb = napi_alloc_skb(&adapter->napi, length); if (new_skb) { @@ -1039,8 +1050,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); - if (adapter->ecdev) { - ecdev_receive(adapter->ecdev, skb->data, length); + if (get_ecdev(adapter)) { + ecdev_receive(get_ecdev(adapter), skb->data, length); adapter->ec_watchdog_jiffies = jiffies; } else { e1000_receive_skb(adapter, netdev, skb, staterr, @@ -1090,7 +1101,7 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring, buffer_info->dma = 0; } if (buffer_info->skb) { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (drop) dev_kfree_skb_any(buffer_info->skb); else @@ -1142,8 +1153,8 @@ static void e1000_print_hw_hang(struct work_struct *work) } /* Real hang detected */ - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_stop_queue(netdev); @@ -1285,12 +1296,12 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) tx_ring->next_to_clean = i; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_completed_queue(netdev, pkts_compl, bytes_compl); } #define TX_WAKE_THRESHOLD 32 - if (!adapter->ecdev && count && netif_carrier_ok(netdev) && + if (!get_ecdev(adapter) && count && netif_carrier_ok(netdev) && e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. @@ -1304,7 +1315,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) } } - if (!adapter->ecdev && adapter->detect_tx_hung) { + if (!get_ecdev(adapter) && adapter->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ @@ -1383,7 +1394,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, if (adapter->flags2 & FLAG2_IS_DISCARDING) { e_dbg("Packet Split buffers didn't pick up the full packet\n"); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_irq(skb); } if (staterr & E1000_RXD_STAT_EOP) @@ -1393,7 +1404,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && !(netdev->features & NETIF_F_RXALL))) { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_irq(skb); } goto next_desc; @@ -1403,7 +1414,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, if (!length) { e_dbg("Last part of the packet spanning multiple descriptors\n"); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_irq(skb); } goto next_desc; @@ -1492,8 +1503,8 @@ copydone: cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) adapter->rx_hdr_split++; - if (adapter->ecdev) { - ecdev_receive(adapter->ecdev, skb->data, length); + if (get_ecdev(adapter)) { + ecdev_receive(get_ecdev(adapter), skb->data, length); adapter->ec_watchdog_jiffies = jiffies; } else { e1000_receive_skb(adapter, netdev, skb, staterr, @@ -1502,7 +1513,7 @@ copydone: next_desc: rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); - if (!adapter->ecdev) buffer_info->skb = NULL; + if (!get_ecdev(adapter)) buffer_info->skb = NULL; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= E1000_RX_BUFFER_WRITE) { @@ -1576,7 +1587,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, skb = buffer_info->skb; - if (!adapter->ecdev) + if (!get_ecdev(adapter)) buffer_info->skb = NULL; ++i; @@ -1602,7 +1613,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, /* recycle both page and skb */ buffer_info->skb = skb; /* an error means any chain goes out the window too */ - if (!adapter->ecdev && rx_ring->rx_skb_top) + if (!get_ecdev(adapter) && rx_ring->rx_skb_top) dev_kfree_skb_irq(rx_ring->rx_skb_top); rx_ring->rx_skb_top = NULL; goto next_desc; @@ -1675,14 +1686,14 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, total_rx_packets++; /* eth type trans needs skb->data to point to something */ - if (!adapter->ecdev && !pskb_may_pull(skb, ETH_HLEN)) { + if (!get_ecdev(adapter) && !pskb_may_pull(skb, ETH_HLEN)) { e_err("pskb_may_pull failed.\n"); dev_kfree_skb_irq(skb); goto next_desc; } - if (adapter->ecdev) { - ecdev_receive(adapter->ecdev, skb->data, length); + if (get_ecdev(adapter)) { + ecdev_receive(get_ecdev(adapter), skb->data, length); adapter->ec_watchdog_jiffies = jiffies; } else { e1000_receive_skb(adapter, netdev, skb, staterr, @@ -1807,7 +1818,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 icr = er32(ICR); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { int ec_work_done = 0; adapter->clean_rx(adapter->rx_ring, &ec_work_done, 100); e1000_clean_tx_irq(adapter->tx_ring); @@ -1881,7 +1892,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 rctl, icr = er32(ICR); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { int ec_work_done = 0; adapter->clean_rx(adapter->rx_ring, &ec_work_done, 100); e1000_clean_tx_irq(adapter->tx_ring); @@ -1969,7 +1980,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) if (icr & E1000_ICR_LSC) { hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->state)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } @@ -2016,7 +2027,7 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) rx_ring->set_itr = 0; } - if (adapter->ecdev) { + if (get_ecdev(adapter)) { int ec_work_done = 0; adapter->clean_rx(adapter->rx_ring, &ec_work_done, 100); } else { @@ -2230,7 +2241,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; int err; - if (adapter->ecdev) + if (get_ecdev(adapter)) return 0; if (adapter->msix_entries) { @@ -2265,7 +2276,7 @@ static void e1000_free_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -2299,7 +2310,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) ew32(EIAC_82574, 0); e1e_flush(); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -2321,7 +2332,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) + if (get_ecdev(adapter)) return; if (adapter->msix_entries) { @@ -3848,7 +3859,7 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000e_setup_rss_hash(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { adapter->alloc_rx_buf(rx_ring, adapter->rx_ring->count, GFP_KERNEL); } else { adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), @@ -4290,7 +4301,7 @@ void e1000e_reset(struct e1000_adapter *adapter) /** * e1000e_trigger_lsc - trigger an LSC interrupt - * @adapter: + * @adapter: * * Fire a link status change interrupt to start the watchdog. **/ @@ -4311,7 +4322,7 @@ void e1000e_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (adapter->msix_entries) e1000_configure_msix(adapter); e1000_irq_enable(adapter); @@ -4364,8 +4375,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) */ set_bit(__E1000_DOWN, &adapter->state); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); @@ -4377,7 +4388,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) ew32(RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ - if (!adapter->ecdev) + if (!get_ecdev(adapter)) netif_stop_queue(netdev); /* disable transmits in the hardware */ @@ -4389,7 +4400,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) e1e_flush(); usleep_range(10000, 11000); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { e1000_irq_disable(adapter); napi_synchronize(&adapter->napi); @@ -4722,8 +4733,8 @@ int e1000e_open(struct net_device *netdev) pm_runtime_get_sync(&pdev->dev); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); netif_stop_queue(netdev); @@ -4771,7 +4782,7 @@ int e1000e_open(struct net_device *netdev) * ignore e1000e MSI messages, which means we need to test our MSI * interrupt now */ - if (!adapter->ecdev && adapter->int_mode != E1000E_INT_MODE_LEGACY) { + if (!get_ecdev(adapter) && adapter->int_mode != E1000E_INT_MODE_LEGACY) { err = e1000_test_msi(adapter); if (err) { e_err("Interrupt allocation failed\n"); @@ -4782,7 +4793,7 @@ int e1000e_open(struct net_device *netdev) /* From here on the code is the same as e1000e_up() */ clear_bit(__E1000_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_enable(&adapter->napi); e1000_irq_enable(adapter); @@ -4843,7 +4854,7 @@ int e1000e_close(struct net_device *netdev) netdev_info(netdev, "NIC Link is Down\n"); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_disable(&adapter->napi); } @@ -4887,7 +4898,11 @@ static int e1000_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, addr->sa_data); +#else memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); +#endif memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); @@ -5314,9 +5329,9 @@ static void e1000_watchdog_task(struct work_struct *work) return; link = e1000e_has_link(adapter); - if ((adapter->ecdev && (ecdev_get_link(adapter->ecdev)) && link) - || (!adapter->ecdev && (netif_carrier_ok(netdev)) && link)) { - if (!adapter->ecdev) { + if ((get_ecdev(adapter) && (ecdev_get_link(get_ecdev(adapter))) && link) + || (!get_ecdev(adapter) && (netif_carrier_ok(netdev)) && link)) { + if (!get_ecdev(adapter)) { /* Cancel scheduled suspend requests. */ pm_runtime_resume(netdev->dev.parent); } @@ -5330,8 +5345,8 @@ static void e1000_watchdog_task(struct work_struct *work) e1000_update_mng_vlan(adapter); if (link) { - if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) - || (!adapter->ecdev && !netif_carrier_ok(netdev))) { + if ((get_ecdev(adapter) && !ecdev_get_link(get_ecdev(adapter))) + || (!get_ecdev(adapter) && !netif_carrier_ok(netdev))) { bool txb2b = true; /* Cancel scheduled suspend requests. */ @@ -5447,27 +5462,27 @@ static void e1000_watchdog_task(struct work_struct *work) if (phy->ops.cfg_on_link_up) phy->ops.cfg_on_link_up(hw); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 1); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 1); } else { netif_wake_queue(netdev); netif_carrier_on(netdev); } - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->state)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); } } else { - if ((adapter->ecdev && ecdev_get_link(adapter->ecdev)) - || (!adapter->ecdev && netif_carrier_ok(netdev))) { + if ((get_ecdev(adapter) && ecdev_get_link(get_ecdev(adapter))) + || (!get_ecdev(adapter) && netif_carrier_ok(netdev))) { adapter->link_speed = 0; adapter->link_duplex = 0; /* Link status message must follow this format */ netdev_info(netdev, "NIC Link is Down\n"); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); @@ -5569,7 +5584,7 @@ link_up: } /* Reset the timer */ - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->state)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); } @@ -5950,13 +5965,13 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, __be16 protocol = vlan_get_protocol(skb); if (test_bit(__E1000_DOWN, &adapter->state)) { - if (!adapter->ecdev) + if (!get_ecdev(adapter)) dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } if (skb->len <= 0) { - if (!adapter->ecdev) + if (!get_ecdev(adapter)) dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } @@ -5985,7 +6000,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, pull_size = min_t(unsigned int, 4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { e_err("__pskb_pull_tail failed.\n"); - if (!adapter->ecdev) + if (!get_ecdev(adapter)) dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } @@ -6011,7 +6026,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ - if (!adapter->ecdev && e1000_maybe_stop_tx(tx_ring, count + 2)) + if (!get_ecdev(adapter) && e1000_maybe_stop_tx(tx_ring, count + 2)) return NETDEV_TX_BUSY; if (skb_vlan_tag_present(skb)) { @@ -6024,7 +6039,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tso = e1000_tso(tx_ring, skb, protocol); if (tso < 0) { - if (!adapter->ecdev) + if (!get_ecdev(adapter)) dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } @@ -6066,14 +6081,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { e1000_maybe_stop_tx(tx_ring, (MAX_SKB_FRAGS * DIV_ROUND_UP(PAGE_SIZE, adapter->tx_fifo_limit) + 2)); } - if (adapter->ecdev || !netdev_xmit_more() || + if (get_ecdev(adapter) || !netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { if (check_arbiter_wa_flag(adapter)) e1000e_update_tdt_wa(tx_ring, @@ -6082,7 +6097,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, writel(tx_ring->next_to_use, tx_ring->tail); } } else { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(skb); } tx_ring->buffer_info[first].time_stamp = 0; @@ -6184,7 +6199,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) struct e1000_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; /* Jumbo frame support */ @@ -7004,7 +7019,7 @@ static int __e1000_resume(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; u16 aspm_disable_flag = 0; - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) @@ -7078,7 +7093,7 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); int rc; - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; e1000e_flush_lpic(pdev); @@ -7571,6 +7586,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->hw.mac.type = ei->mac; adapter->max_hw_frame_size = ei->max_hw_frame_size; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->ecdev_initialized = 0; mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); @@ -7705,7 +7721,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "NVM Read Error while reading MAC address\n"); +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, adapter->hw.mac.addr); +#else memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); +#endif if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", @@ -7797,12 +7817,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac.type >= e1000_pch_cnp) adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS; - adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); - if (adapter->ecdev) { + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = 1; + if (get_ecdev(adapter)) { init_irq_work(&adapter->watchdog_kicker, ec_watchdog_kicker); - err = ecdev_open(adapter->ecdev); + err = ecdev_open(get_ecdev(adapter)); if (err) { - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); goto err_register; } adapter->ec_watchdog_jiffies = jiffies; @@ -7874,10 +7895,10 @@ static void e1000_remove(struct pci_dev *pdev) e1000e_ptp_remove(adapter); - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); irq_work_sync(&adapter->watchdog_kicker); - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); } /* The timers may be rescheduled, so explicitly disable them @@ -7901,7 +7922,7 @@ static void e1000_remove(struct pci_dev *pdev) } } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { unregister_netdev(netdev); } diff --git a/devices/e1000e/netdev-6.1-ethercat.c b/devices/e1000e/netdev-6.1-ethercat.c index 01685fa4..1c2209d2 100644 --- a/devices/e1000e/netdev-6.1-ethercat.c +++ b/devices/e1000e/netdev-6.1-ethercat.c @@ -34,7 +34,7 @@ char e1000e_driver_name[] = "ec_e1000e"; static inline int check_arbiter_wa_flag(const struct e1000_adapter *adapter) { #ifdef EC_DISABLE_E1000E_WORKAROUND - return !adapter->ecdev && (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA); + return !get_ecdev(adapter) && (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA); #else return adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA; #endif @@ -949,7 +949,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, skb = buffer_info->skb; - if (!adapter->ecdev) + if (!get_ecdev(adapter)) buffer_info->skb = NULL; prefetch(skb->data - NET_IP_ALIGN); @@ -989,7 +989,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, goto next_desc; } - if (unlikely(!adapter->ecdev && + if (unlikely(!get_ecdev(adapter) && (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && !(netdev->features & NETIF_F_RXALL))) { /* recycle */ @@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, * performance for small packets with large amounts * of reassembly being done in the stack */ - if (!adapter->ecdev && length < copybreak) { + if (!get_ecdev(adapter) && length < copybreak) { struct sk_buff *new_skb = napi_alloc_skb(&adapter->napi, length); if (new_skb) { @@ -1040,8 +1040,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); - if (adapter->ecdev) { - ecdev_receive(adapter->ecdev, skb->data, length); + if (get_ecdev(adapter)) { + ecdev_receive(get_ecdev(adapter), skb->data, length); adapter->ec_watchdog_jiffies = jiffies; } else { e1000_receive_skb(adapter, netdev, skb, staterr, @@ -1091,7 +1091,7 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring, buffer_info->dma = 0; } if (buffer_info->skb) { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (drop) dev_kfree_skb_any(buffer_info->skb); else @@ -1143,8 +1143,8 @@ static void e1000_print_hw_hang(struct work_struct *work) } /* Real hang detected */ - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_stop_queue(netdev); @@ -1286,12 +1286,12 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) tx_ring->next_to_clean = i; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_completed_queue(netdev, pkts_compl, bytes_compl); } #define TX_WAKE_THRESHOLD 32 - if (!adapter->ecdev && count && netif_carrier_ok(netdev) && + if (!get_ecdev(adapter) && count && netif_carrier_ok(netdev) && e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. @@ -1305,7 +1305,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) } } - if (!adapter->ecdev && adapter->detect_tx_hung) { + if (!get_ecdev(adapter) && adapter->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ @@ -1384,7 +1384,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, if (adapter->flags2 & FLAG2_IS_DISCARDING) { e_dbg("Packet Split buffers didn't pick up the full packet\n"); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_irq(skb); } if (staterr & E1000_RXD_STAT_EOP) @@ -1394,7 +1394,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && !(netdev->features & NETIF_F_RXALL))) { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_irq(skb); } goto next_desc; @@ -1404,7 +1404,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, if (!length) { e_dbg("Last part of the packet spanning multiple descriptors\n"); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_irq(skb); } goto next_desc; @@ -1493,8 +1493,8 @@ copydone: cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) adapter->rx_hdr_split++; - if (adapter->ecdev) { - ecdev_receive(adapter->ecdev, skb->data, length); + if (get_ecdev(adapter)) { + ecdev_receive(get_ecdev(adapter), skb->data, length); adapter->ec_watchdog_jiffies = jiffies; } else { e1000_receive_skb(adapter, netdev, skb, staterr, @@ -1503,7 +1503,7 @@ copydone: next_desc: rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); - if (!adapter->ecdev) buffer_info->skb = NULL; + if (!get_ecdev(adapter)) buffer_info->skb = NULL; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= E1000_RX_BUFFER_WRITE) { @@ -1577,7 +1577,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, skb = buffer_info->skb; - if (!adapter->ecdev) + if (!get_ecdev(adapter)) buffer_info->skb = NULL; ++i; @@ -1603,7 +1603,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, /* recycle both page and skb */ buffer_info->skb = skb; /* an error means any chain goes out the window too */ - if (!adapter->ecdev && rx_ring->rx_skb_top) + if (!get_ecdev(adapter) && rx_ring->rx_skb_top) dev_kfree_skb_irq(rx_ring->rx_skb_top); rx_ring->rx_skb_top = NULL; goto next_desc; @@ -1676,14 +1676,14 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, total_rx_packets++; /* eth type trans needs skb->data to point to something */ - if (!adapter->ecdev && !pskb_may_pull(skb, ETH_HLEN)) { + if (!get_ecdev(adapter) && !pskb_may_pull(skb, ETH_HLEN)) { e_err("pskb_may_pull failed.\n"); dev_kfree_skb_irq(skb); goto next_desc; } - if (adapter->ecdev) { - ecdev_receive(adapter->ecdev, skb->data, length); + if (get_ecdev(adapter)) { + ecdev_receive(get_ecdev(adapter), skb->data, length); adapter->ec_watchdog_jiffies = jiffies; } else { e1000_receive_skb(adapter, netdev, skb, staterr, @@ -1808,7 +1808,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 icr = er32(ICR); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { int ec_work_done = 0; adapter->clean_rx(adapter->rx_ring, &ec_work_done, 100); e1000_clean_tx_irq(adapter->tx_ring); @@ -1882,7 +1882,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 rctl, icr = er32(ICR); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { int ec_work_done = 0; adapter->clean_rx(adapter->rx_ring, &ec_work_done, 100); e1000_clean_tx_irq(adapter->tx_ring); @@ -1970,7 +1970,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) if (icr & E1000_ICR_LSC) { hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->state)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } @@ -2017,7 +2017,7 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) rx_ring->set_itr = 0; } - if (adapter->ecdev) { + if (get_ecdev(adapter)) { int ec_work_done = 0; adapter->clean_rx(adapter->rx_ring, &ec_work_done, 100); } else { @@ -2231,7 +2231,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; int err; - if (adapter->ecdev) + if (get_ecdev(adapter)) return 0; if (adapter->msix_entries) { @@ -2266,7 +2266,7 @@ static void e1000_free_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -2300,7 +2300,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) ew32(EIAC_82574, 0); e1e_flush(); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return; } @@ -2322,7 +2322,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) + if (get_ecdev(adapter)) return; if (adapter->msix_entries) { @@ -3845,7 +3845,7 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000e_setup_rss_hash(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { adapter->alloc_rx_buf(rx_ring, adapter->rx_ring->count, GFP_KERNEL); } else { adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), @@ -4309,7 +4309,7 @@ void e1000e_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (adapter->msix_entries) e1000_configure_msix(adapter); e1000_irq_enable(adapter); @@ -4362,8 +4362,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) */ set_bit(__E1000_DOWN, &adapter->state); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); @@ -4375,7 +4375,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) ew32(RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ - if (!adapter->ecdev) + if (!get_ecdev(adapter)) netif_stop_queue(netdev); /* disable transmits in the hardware */ @@ -4387,7 +4387,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) e1e_flush(); usleep_range(10000, 11000); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { e1000_irq_disable(adapter); napi_synchronize(&adapter->napi); @@ -4720,8 +4720,8 @@ int e1000e_open(struct net_device *netdev) pm_runtime_get_sync(&pdev->dev); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); netif_stop_queue(netdev); @@ -4769,7 +4769,7 @@ int e1000e_open(struct net_device *netdev) * ignore e1000e MSI messages, which means we need to test our MSI * interrupt now */ - if (!adapter->ecdev && adapter->int_mode != E1000E_INT_MODE_LEGACY) { + if (!get_ecdev(adapter) && adapter->int_mode != E1000E_INT_MODE_LEGACY) { err = e1000_test_msi(adapter); if (err) { e_err("Interrupt allocation failed\n"); @@ -4780,7 +4780,7 @@ int e1000e_open(struct net_device *netdev) /* From here on the code is the same as e1000e_up() */ clear_bit(__E1000_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_enable(&adapter->napi); e1000_irq_enable(adapter); @@ -4841,7 +4841,7 @@ int e1000e_close(struct net_device *netdev) netdev_info(netdev, "NIC Link is Down\n"); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_disable(&adapter->napi); } @@ -5312,9 +5312,9 @@ static void e1000_watchdog_task(struct work_struct *work) return; link = e1000e_has_link(adapter); - if ((adapter->ecdev && (ecdev_get_link(adapter->ecdev)) && link) - || (!adapter->ecdev && (netif_carrier_ok(netdev)) && link)) { - if (!adapter->ecdev) { + if ((get_ecdev(adapter) && (ecdev_get_link(get_ecdev(adapter))) && link) + || (!get_ecdev(adapter) && (netif_carrier_ok(netdev)) && link)) { + if (!get_ecdev(adapter)) { /* Cancel scheduled suspend requests. */ pm_runtime_resume(netdev->dev.parent); } @@ -5328,8 +5328,8 @@ static void e1000_watchdog_task(struct work_struct *work) e1000_update_mng_vlan(adapter); if (link) { - if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) - || (!adapter->ecdev && !netif_carrier_ok(netdev))) { + if ((get_ecdev(adapter) && !ecdev_get_link(get_ecdev(adapter))) + || (!get_ecdev(adapter) && !netif_carrier_ok(netdev))) { bool txb2b = true; /* Cancel scheduled suspend requests. */ @@ -5420,27 +5420,27 @@ static void e1000_watchdog_task(struct work_struct *work) if (phy->ops.cfg_on_link_up) phy->ops.cfg_on_link_up(hw); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 1); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 1); } else { netif_wake_queue(netdev); netif_carrier_on(netdev); } - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->state)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); } } else { - if ((adapter->ecdev && ecdev_get_link(adapter->ecdev)) - || (!adapter->ecdev && netif_carrier_ok(netdev))) { + if ((get_ecdev(adapter) && ecdev_get_link(get_ecdev(adapter))) + || (!get_ecdev(adapter) && netif_carrier_ok(netdev))) { adapter->link_speed = 0; adapter->link_duplex = 0; /* Link status message must follow this format */ netdev_info(netdev, "NIC Link is Down\n"); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); @@ -5542,7 +5542,7 @@ link_up: } /* Reset the timer */ - if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->state)) + if (!get_ecdev(adapter) && !test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); } @@ -5923,13 +5923,13 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, __be16 protocol = vlan_get_protocol(skb); if (test_bit(__E1000_DOWN, &adapter->state)) { - if (!adapter->ecdev) + if (!get_ecdev(adapter)) dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } if (skb->len <= 0) { - if (!adapter->ecdev) + if (!get_ecdev(adapter)) dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } @@ -5958,7 +5958,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, pull_size = min_t(unsigned int, 4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { e_err("__pskb_pull_tail failed.\n"); - if (!adapter->ecdev) + if (!get_ecdev(adapter)) dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } @@ -5984,7 +5984,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ - if (!adapter->ecdev && e1000_maybe_stop_tx(tx_ring, count + 2)) + if (!get_ecdev(adapter) && e1000_maybe_stop_tx(tx_ring, count + 2)) return NETDEV_TX_BUSY; if (skb_vlan_tag_present(skb)) { @@ -5997,7 +5997,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tso = e1000_tso(tx_ring, skb, protocol); if (tso < 0) { - if (!adapter->ecdev) + if (!get_ecdev(adapter)) dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } @@ -6039,14 +6039,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { e1000_maybe_stop_tx(tx_ring, ((MAX_SKB_FRAGS + 1) * DIV_ROUND_UP(PAGE_SIZE, adapter->tx_fifo_limit) + 4)); } - if (adapter->ecdev || !netdev_xmit_more() || + if (get_ecdev(adapter) || !netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { if (check_arbiter_wa_flag(adapter)) e1000e_update_tdt_wa(tx_ring, @@ -6055,7 +6055,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, writel(tx_ring->next_to_use, tx_ring->tail); } } else { - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(skb); } tx_ring->buffer_info[first].time_stamp = 0; @@ -6157,7 +6157,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) struct e1000_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; /* Jumbo frame support */ @@ -7033,7 +7033,7 @@ static int __e1000_resume(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; u16 aspm_disable_flag = 0; - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) @@ -7107,7 +7107,7 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); int rc; - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; e1000e_flush_lpic(pdev); @@ -7594,6 +7594,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->hw.mac.type = ei->mac; adapter->max_hw_frame_size = ei->max_hw_frame_size; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->ecdev_initialized = 0; mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); @@ -7844,12 +7845,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac.type >= e1000_pch_cnp) adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS; - adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); - if (adapter->ecdev) { + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = 1; + if (get_ecdev(adapter)) { init_irq_work(&adapter->watchdog_kicker, ec_watchdog_kicker); - err = ecdev_open(adapter->ecdev); + err = ecdev_open(get_ecdev(adapter)); if (err) { - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); goto err_register; } adapter->ec_watchdog_jiffies = jiffies; @@ -7921,10 +7923,10 @@ static void e1000_remove(struct pci_dev *pdev) e1000e_ptp_remove(adapter); - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); irq_work_sync(&adapter->watchdog_kicker); - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); } /* The timers may be rescheduled, so explicitly disable them @@ -7948,7 +7950,7 @@ static void e1000_remove(struct pci_dev *pdev) } } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { unregister_netdev(netdev); } diff --git a/devices/generic.c b/devices/generic.c index fac65964..53307bda 100644 --- a/devices/generic.c +++ b/devices/generic.c @@ -41,6 +41,17 @@ #define EC_GEN_RX_BUF_SIZE 1600 +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + /****************************************************************************/ int __init ec_gen_init_module(void); @@ -239,7 +250,7 @@ int ec_gen_device_offer( int ret = 0; dev->used_netdev = desc->netdev; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) || (SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5) eth_hw_addr_set(dev->netdev, desc->dev_addr); #else memcpy(dev->netdev->dev_addr, desc->dev_addr, ETH_ALEN); diff --git a/devices/genet/Kbuild.in b/devices/genet/Kbuild.in index 40c6f698..425808b1 100644 --- a/devices/genet/Kbuild.in +++ b/devices/genet/Kbuild.in @@ -38,6 +38,10 @@ ifeq (@ENABLE_GENET@,1) bcmmii-@KERNEL_GENET@-ethercat.o \ bcmgenet_wol-@KERNEL_GENET@-ethercat.o +ifeq (@ENABLE_DRIVER_RESOURCE_VERIFYING@,1) + ccflags-y := -DEC_ENABLE_DRIVER_RESOURCE_VERIFYING +endif + CFLAGS_bcmgenet-@KERNEL_GENET@-ethercat.o = -DREV="\"$(REV)\"" endif diff --git a/devices/genet/Makefile.am b/devices/genet/Makefile.am index db7bb228..de3c0178 100644 --- a/devices/genet/Makefile.am +++ b/devices/genet/Makefile.am @@ -22,13 +22,33 @@ include $(top_srcdir)/Makefile.kbuild EXTRA_DIST = \ + bcmgenet-5.10-ethercat.c \ + bcmgenet-5.10-ethercat.h \ + bcmgenet-5.10-orig.c \ + bcmgenet-5.10-orig.h \ + bcmgenet-5.14-ethercat.c \ + bcmgenet-5.14-ethercat.h \ + bcmgenet-5.14-orig.c \ + bcmgenet-5.14-orig.h \ bcmgenet-6.1-ethercat.c \ bcmgenet-6.1-ethercat.h \ bcmgenet-6.1-orig.c \ bcmgenet-6.1-orig.h \ + bcmgenet_wol-5.10-ethercat.c \ + bcmgenet_wol-5.10-orig.c \ + bcmgenet_wol-5.14-ethercat.c \ + bcmgenet_wol-5.14-orig.c \ bcmgenet_wol-6.1-ethercat.c \ bcmgenet_wol-6.1-orig.c \ + bcmmii-5.14-ethercat.c \ + bcmmii-5.14-orig.c \ + bcmmii-5.10-ethercat.c \ + bcmmii-5.10-orig.c \ bcmmii-6.1-ethercat.c \ - bcmmii-6.1-orig.c + bcmmii-6.1-orig.c \ + unimac-5.14-ethercat.h \ + unimac-5.14-orig.h \ + unimac-6.1-ethercat.h \ + unimac-6.1-orig.h #------------------------------------------------------------------------------ diff --git a/devices/genet/bcmgenet-5.10-ethercat.c b/devices/genet/bcmgenet-5.10-ethercat.c new file mode 100644 index 00000000..16835df3 --- /dev/null +++ b/devices/genet/bcmgenet-5.10-ethercat.c @@ -0,0 +1,4425 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) controller driver + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bcmgenet-5.10-ethercat.h" + +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT 4 + +/* Default highest priority queue for multi queue support */ +#define GENET_Q0_PRIORITY 0 + +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) + +#define RX_BUF_LENGTH 2048 +#define SKB_ALIGNMENT 32 + +/* Tx/Rx DMA register offset, skip 256 descriptors */ +#define WORDS_PER_BD(p) (p->hw_params->words_per_bd) +#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) + +#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +/* Forward declarations */ +static void bcmgenet_set_rx_mode(struct net_device *dev); + +static inline void bcmgenet_writel(u32 value, void __iomem *offset) +{ + /* MIPS chips strapped for BE will automagically configure the + * peripheral registers for CPU-native byte order. + */ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(value, offset); + else + writel(value, offset); +} + +static inline u32 bcmgenet_readl(void __iomem *offset) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(offset); + else + return readl(offset); +} + +static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, + void __iomem *d, u32 value) +{ + bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); +} + +static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, + void __iomem *d, + dma_addr_t addr) +{ + bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); +#endif +} + +/* Combined address + length/status setter */ +static inline void dmadesc_set(struct bcmgenet_priv *priv, + void __iomem *d, dma_addr_t addr, u32 val) +{ + dmadesc_set_addr(priv, d, addr); + dmadesc_set_length_status(priv, d, val); +} + +static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, + void __iomem *d) +{ + dma_addr_t addr; + + addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32; +#endif + return addr; +} + +#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" + +#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); + else + return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); +} + +static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); + else + bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); +} + +/* These macros are defined to deal with register map change + * between GENET1.1 and GENET2. Only those currently being used + * by driver are defined. + */ +static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +/* RX/TX DMA register accessors */ +enum dma_reg { + DMA_RING_CFG = 0, + DMA_CTRL, + DMA_STATUS, + DMA_SCB_BURST_SIZE, + DMA_ARB_CTRL, + DMA_PRIORITY_0, + DMA_PRIORITY_1, + DMA_PRIORITY_2, + DMA_INDEX2RING_0, + DMA_INDEX2RING_1, + DMA_INDEX2RING_2, + DMA_INDEX2RING_3, + DMA_INDEX2RING_4, + DMA_INDEX2RING_5, + DMA_INDEX2RING_6, + DMA_INDEX2RING_7, + DMA_RING0_TIMEOUT, + DMA_RING1_TIMEOUT, + DMA_RING2_TIMEOUT, + DMA_RING3_TIMEOUT, + DMA_RING4_TIMEOUT, + DMA_RING5_TIMEOUT, + DMA_RING6_TIMEOUT, + DMA_RING7_TIMEOUT, + DMA_RING8_TIMEOUT, + DMA_RING9_TIMEOUT, + DMA_RING10_TIMEOUT, + DMA_RING11_TIMEOUT, + DMA_RING12_TIMEOUT, + DMA_RING13_TIMEOUT, + DMA_RING14_TIMEOUT, + DMA_RING15_TIMEOUT, + DMA_RING16_TIMEOUT, +}; + +static const u8 bcmgenet_dma_regs_v3plus[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x2C, + [DMA_PRIORITY_0] = 0x30, + [DMA_PRIORITY_1] = 0x34, + [DMA_PRIORITY_2] = 0x38, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, + [DMA_INDEX2RING_0] = 0x70, + [DMA_INDEX2RING_1] = 0x74, + [DMA_INDEX2RING_2] = 0x78, + [DMA_INDEX2RING_3] = 0x7C, + [DMA_INDEX2RING_4] = 0x80, + [DMA_INDEX2RING_5] = 0x84, + [DMA_INDEX2RING_6] = 0x88, + [DMA_INDEX2RING_7] = 0x8C, +}; + +static const u8 bcmgenet_dma_regs_v2[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +static const u8 bcmgenet_dma_regs_v1[] = { + [DMA_CTRL] = 0x00, + [DMA_STATUS] = 0x04, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +/* Set at runtime once bcmgenet version is known */ +static const u8 *bcmgenet_dma_regs; + +static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) +{ + return netdev_priv(dev_get_drvdata(dev)); +} + +static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +/* RDMA/TDMA ring registers and accessors + * we merge the common fields and just prefix with T/D the registers + * having different meaning depending on the direction + */ +enum dma_ring_reg { + TDMA_READ_PTR = 0, + RDMA_WRITE_PTR = TDMA_READ_PTR, + TDMA_READ_PTR_HI, + RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, + TDMA_CONS_INDEX, + RDMA_PROD_INDEX = TDMA_CONS_INDEX, + TDMA_PROD_INDEX, + RDMA_CONS_INDEX = TDMA_PROD_INDEX, + DMA_RING_BUF_SIZE, + DMA_START_ADDR, + DMA_START_ADDR_HI, + DMA_END_ADDR, + DMA_END_ADDR_HI, + DMA_MBUF_DONE_THRESH, + TDMA_FLOW_PERIOD, + RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, + TDMA_WRITE_PTR, + RDMA_READ_PTR = TDMA_WRITE_PTR, + TDMA_WRITE_PTR_HI, + RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI +}; + +/* GENET v4 supports 40-bits pointer addressing + * for obvious reasons the LO and HI word parts + * are contiguous, but this offsets the other + * registers. + */ +static const u8 genet_dma_ring_regs_v4[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_READ_PTR_HI] = 0x04, + [TDMA_CONS_INDEX] = 0x08, + [TDMA_PROD_INDEX] = 0x0C, + [DMA_RING_BUF_SIZE] = 0x10, + [DMA_START_ADDR] = 0x14, + [DMA_START_ADDR_HI] = 0x18, + [DMA_END_ADDR] = 0x1C, + [DMA_END_ADDR_HI] = 0x20, + [DMA_MBUF_DONE_THRESH] = 0x24, + [TDMA_FLOW_PERIOD] = 0x28, + [TDMA_WRITE_PTR] = 0x2C, + [TDMA_WRITE_PTR_HI] = 0x30, +}; + +static const u8 genet_dma_ring_regs_v123[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_CONS_INDEX] = 0x04, + [TDMA_PROD_INDEX] = 0x08, + [DMA_RING_BUF_SIZE] = 0x0C, + [DMA_START_ADDR] = 0x10, + [DMA_END_ADDR] = 0x14, + [DMA_MBUF_DONE_THRESH] = 0x18, + [TDMA_FLOW_PERIOD] = 0x1C, + [TDMA_WRITE_PTR] = 0x20, +}; + +/* Set at runtime once GENET version is known */ +static const u8 *genet_dma_ring_regs; + +static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg |= RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); +} + +static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset, reg, reg1; + + offset = HFB_FLT_ENABLE_V3PLUS; + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + if (f_index < 32) { + reg1 &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32)); + } else { + reg &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + } + if (!reg && !reg1) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg &= ~RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } +} + +static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, + u32 f_index, u32 rx_queue) +{ + u32 offset; + u32 reg; + + offset = f_index / 8; + reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); + reg &= ~(0xF << (4 * (f_index % 8))); + reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); + bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); +} + +static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, + u32 f_index, u32 f_length) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_LEN_V3PLUS + + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * + sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg &= ~(0xFF << (8 * (f_index % 4))); + reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static int bcmgenet_hfb_validate_mask(void *mask, size_t size) +{ + while (size) { + switch (*(unsigned char *)mask++) { + case 0x00: + case 0x0f: + case 0xf0: + case 0xff: + size--; + continue; + default: + return -EINVAL; + } + } + + return 0; +} + +#define VALIDATE_MASK(x) \ + bcmgenet_hfb_validate_mask(&(x), sizeof(x)) + +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, + u32 offset, void *val, void *mask, + size_t size) +{ + u32 index, tmp; + + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); + + while (size--) { + if (offset++ & 1) { + tmp &= ~0x300FF; + tmp |= (*(unsigned char *)val++); + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0x30000; + break; + case 0xF0: + tmp |= 0x20000; + break; + case 0x0F: + tmp |= 0x10000; + break; + } + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); + if (size) + tmp = bcmgenet_hfb_readl(priv, + index * sizeof(u32)); + } else { + tmp &= ~0xCFF00; + tmp |= (*(unsigned char *)val++) << 8; + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0xC0000; + break; + case 0xF0: + tmp |= 0x80000; + break; + case 0x0F: + tmp |= 0x40000; + break; + } + if (!size) + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); + } + } + + return 0; +} + +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, + struct bcmgenet_rxnfc_rule *rule) +{ + struct ethtool_rx_flow_spec *fs = &rule->fs; + u32 offset = 0, f_length = 0, f; + u8 val_8, mask_8; + __be16 val_16; + u16 mask_16; + size_t size; + + f = fs->location; + if (fs->flow_type & FLOW_MAC_EXT) { + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_ext.h_dest, &fs->m_ext.h_dest, + sizeof(fs->h_ext.h_dest)); + } + + if (fs->flow_type & FLOW_EXT) { + if (fs->m_ext.vlan_etype || + fs->m_ext.vlan_tci) { + bcmgenet_hfb_insert_data(priv, f, 12, + &fs->h_ext.vlan_etype, + &fs->m_ext.vlan_etype, + sizeof(fs->h_ext.vlan_etype)); + bcmgenet_hfb_insert_data(priv, f, 14, + &fs->h_ext.vlan_tci, + &fs->m_ext.vlan_tci, + sizeof(fs->h_ext.vlan_tci)); + offset += VLAN_HLEN; + f_length += DIV_ROUND_UP(VLAN_HLEN, 2); + } + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN, 2); + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_u.ether_spec.h_dest, + &fs->m_u.ether_spec.h_dest, + sizeof(fs->h_u.ether_spec.h_dest)); + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, + &fs->h_u.ether_spec.h_source, + &fs->m_u.ether_spec.h_source, + sizeof(fs->h_u.ether_spec.h_source)); + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &fs->h_u.ether_spec.h_proto, + &fs->m_u.ether_spec.h_proto, + sizeof(fs->h_u.ether_spec.h_proto)); + break; + case IP_USER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2); + /* Specify IP Ether Type */ + val_16 = htons(ETH_P_IP); + mask_16 = 0xFFFF; + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmgenet_hfb_insert_data(priv, f, 15 + offset, + &fs->h_u.usr_ip4_spec.tos, + &fs->m_u.usr_ip4_spec.tos, + sizeof(fs->h_u.usr_ip4_spec.tos)); + bcmgenet_hfb_insert_data(priv, f, 23 + offset, + &fs->h_u.usr_ip4_spec.proto, + &fs->m_u.usr_ip4_spec.proto, + sizeof(fs->h_u.usr_ip4_spec.proto)); + bcmgenet_hfb_insert_data(priv, f, 26 + offset, + &fs->h_u.usr_ip4_spec.ip4src, + &fs->m_u.usr_ip4_spec.ip4src, + sizeof(fs->h_u.usr_ip4_spec.ip4src)); + bcmgenet_hfb_insert_data(priv, f, 30 + offset, + &fs->h_u.usr_ip4_spec.ip4dst, + &fs->m_u.usr_ip4_spec.ip4dst, + sizeof(fs->h_u.usr_ip4_spec.ip4dst)); + if (!fs->m_u.usr_ip4_spec.l4_4_bytes) + break; + + /* Only supports 20 byte IPv4 header */ + val_8 = 0x45; + mask_8 = 0xFF; + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, + &val_8, &mask_8, + sizeof(val_8)); + size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); + bcmgenet_hfb_insert_data(priv, f, + ETH_HLEN + 20 + offset, + &fs->h_u.usr_ip4_spec.l4_4_bytes, + &fs->m_u.usr_ip4_spec.l4_4_bytes, + size); + f_length += DIV_ROUND_UP(size, 2); + break; + } + + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); + if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { + /* Ring 0 flows can be handled by the default Descriptor Ring + * We'll map them to ring 0, but don't enable the filter + */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); + rule->state = BCMGENET_RXNFC_STATE_DISABLED; + } else { + /* Other Rx rings are direct mapped here */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, + fs->ring_cookie); + bcmgenet_hfb_enable_filter(priv, f); + rule->state = BCMGENET_RXNFC_STATE_ENABLED; + } +} + +/* bcmgenet_hfb_clear + * + * Clear Hardware Filter Block and disable all filtering. + */ +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 base, i; + + base = f_index * priv->hw_params->hfb_filter_size; + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); +} + +static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) +{ + u32 i; + + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); + + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0x0, i); + + for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) + bcmgenet_hfb_reg_writel(priv, 0x0, + HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) + bcmgenet_hfb_clear_filter(priv, i); +} + +static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) +{ + int i; + + INIT_LIST_HEAD(&priv->rxnfc_list); + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); + priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; + } + + bcmgenet_hfb_clear(priv); +} + +static int bcmgenet_begin(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn on the clock */ + return clk_prepare_enable(priv->clk); +} + +static void bcmgenet_complete(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn off the clock */ + clk_disable_unprepare(priv->clk); +} + +static int bcmgenet_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + phy_ethtool_ksettings_get(dev->phydev, cmd); + + return 0; +} + +static int bcmgenet_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + return phy_ethtool_ksettings_set(dev->phydev, cmd); +} + +static int bcmgenet_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Make sure we reflect the value of CRC_CMD_FWD */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static u32 bcmgenet_get_msglevel(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + priv->msg_enable = level; +} + +static int bcmgenet_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rx_ring *ring; + unsigned int i; + + ec->tx_max_coalesced_frames = + bcmgenet_tdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_max_coalesced_frames = + bcmgenet_rdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_coalesce_usecs = + bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; + + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ring = &priv->rx_rings[i]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + } + ring = &priv->rx_rings[DESC_INDEX]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + + return 0; +} + +static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, + u32 usecs, u32 pkts) +{ + struct bcmgenet_priv *priv = ring->priv; + unsigned int i = ring->index; + u32 reg; + + bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH); + + reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); + reg &= ~DMA_TIMEOUT_MASK; + reg |= DIV_ROUND_UP(usecs * 1000, 8192); + bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); +} + +static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct dim_cq_moder moder; + u32 usecs, pkts; + + ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; + ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { + moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + ring->dim.use_dim = ec->use_adaptive_rx_coalesce; + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +static int bcmgenet_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int i; + + /* Base system clock is 125Mhz, DMA timeout is this reference clock + * divided by 1024, which yields roughly 8.192us, our maximum value + * has to fit in the DMA_TIMEOUT_MASK (16 bits) + */ + if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->tx_max_coalesced_frames == 0 || + ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) + return -EINVAL; + + /* GENET TDMA hardware does not support a configurable timeout, but will + * always generate an interrupt either after MBDONE packets have been + * transmitted, or when the ring is empty. + */ + + /* Program all TX queues with the same values, as there is no + * ethtool knob to do coalescing on a per-queue basis + */ + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tdma_ring_writel(priv, i, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + bcmgenet_tdma_ring_writel(priv, DESC_INDEX, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + + for (i = 0; i < priv->hw_params->rx_queues; i++) + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); + + return 0; +} + +/* standard ethtool support functions. */ +enum bcmgenet_stat_type { + BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_MIB_RX, + BCMGENET_STAT_MIB_TX, + BCMGENET_STAT_RUNT, + BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, +}; + +struct bcmgenet_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_sizeof; + int stat_offset; + enum bcmgenet_stat_type type; + /* reg offset from UMAC base for misc counters */ + u16 reg_offset; +}; + +#define STAT_NETDEV(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ + .stat_offset = offsetof(struct net_device_stats, m), \ + .type = BCMGENET_STAT_NETDEV, \ +} + +#define STAT_GENET_MIB(str, m, _type) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = _type, \ +} + +#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) +#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) +#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) + +#define STAT_GENET_MISC(str, m, offset) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = BCMGENET_STAT_MISC, \ + .reg_offset = offset, \ +} + +#define STAT_GENET_Q(num) \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ + tx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ + tx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ + rx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ + rx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ + rx_rings[num].errors), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ + rx_rings[num].dropped) + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define BCMGENET_STAT_OFFSET 0xc + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { + /* general stats */ + STAT_NETDEV(rx_packets), + STAT_NETDEV(tx_packets), + STAT_NETDEV(rx_bytes), + STAT_NETDEV(tx_bytes), + STAT_NETDEV(rx_errors), + STAT_NETDEV(tx_errors), + STAT_NETDEV(rx_dropped), + STAT_NETDEV(tx_dropped), + STAT_NETDEV(multicast), + /* UniMAC RSV counters */ + STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), + STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), + STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), + STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), + STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), + STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), + STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), + STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), + STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), + STAT_GENET_MIB_RX("rx_control", mib.rx.cf), + STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), + STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), + STAT_GENET_MIB_RX("rx_align", mib.rx.aln), + STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), + STAT_GENET_MIB_RX("rx_code", mib.rx.cde), + STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), + STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), + STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), + STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), + STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), + STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), + STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), + STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), + /* UniMAC TSV counters */ + STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), + STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), + STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), + STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), + STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), + STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), + STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), + STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), + STAT_GENET_MIB_TX("tx_control", mib.tx.cf), + STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), + STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), + STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), + STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), + STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), + STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), + STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), + STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), + STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), + STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), + STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), + STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), + STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), + STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), + /* UniMAC RUNT counters */ + STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), + STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), + STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), + STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* Misc UniMAC counters */ + STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, + UMAC_RBUF_OVFL_CNT_V1), + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, + UMAC_RBUF_ERR_CNT_V1), + STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb), + STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed", + mib.tx_realloc_tsb_failed), + /* Per TX queues */ + STAT_GENET_Q(0), + STAT_GENET_Q(1), + STAT_GENET_Q(2), + STAT_GENET_Q(3), + STAT_GENET_Q(16), +}; + +#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) + +static void bcmgenet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); +} + +static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMGENET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmgenet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) +{ + u16 new_offset; + u32 val; + + switch (offset) { + case UMAC_RBUF_OVFL_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_OVFL_CNT_V2; + else + new_offset = RBUF_OVFL_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + case UMAC_RBUF_ERR_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_ERR_CNT_V2; + else + new_offset = RBUF_ERR_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + default: + val = bcmgenet_umac_readl(priv, offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, offset); + break; + } + + return val; +} + +static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) +{ + int i, j = 0; + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + u8 offset = 0; + u32 val = 0; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + switch (s->type) { + case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: + continue; + case BCMGENET_STAT_RUNT: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_TX: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_RX: + val = bcmgenet_umac_readl(priv, + UMAC_MIB_START + j + offset); + offset = 0; /* Reset Offset */ + break; + case BCMGENET_STAT_MISC: + if (GENET_IS_V1(priv)) { + val = bcmgenet_umac_readl(priv, s->reg_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, + s->reg_offset); + } else { + val = bcmgenet_update_stat_misc(priv, + s->reg_offset); + } + break; + } + + j += s->stat_sizeof; + p = (char *)priv + s->stat_offset; + *(u32 *)p = val; + } +} + +static void bcmgenet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_running(dev)) + bcmgenet_update_mib_counters(priv); + + dev->netdev_ops->ndo_get_stats(dev); + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + if (s->type == BCMGENET_STAT_NETDEV) + p = (char *)&dev->stats; + else + p = (char *)priv; + p += s->stat_offset; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; + } +} + +static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; + u32 reg; + + if (enable && !priv->clk_eee_enabled) { + clk_prepare_enable(priv->clk_eee); + priv->clk_eee_enabled = true; + } + + reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = bcmgenet_readl(priv->base + off); + if (enable) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); + bcmgenet_writel(reg, priv->base + off); + + /* Do the same for thing for RBUF */ + reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); + if (enable) + reg |= RBUF_EEE_EN | RBUF_PM_EN; + else + reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); + bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); + + if (!enable && priv->clk_eee_enabled) { + clk_disable_unprepare(priv->clk_eee); + priv->clk_eee_enabled = false; + } + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; +} + +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(dev->phydev, e); +} + +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + int ret = 0; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcmgenet_eee_enable_set(dev, false); + } else { + ret = phy_init_eee(dev->phydev, 0); + if (ret) { + netif_err(priv, hw, dev, "EEE initialization failed\n"); + return ret; + } + + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); + bcmgenet_eee_enable_set(dev, true); + } + + return phy_ethtool_set_eee(dev->phydev, e); +} + +static int bcmgenet_validate_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_usrip4_spec *l4_mask; + struct ethhdr *eth_mask; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) { + netdev_err(dev, "rxnfc: Invalid location (%d)\n", + cmd->fs.location); + return -EINVAL; + } + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case IP_USER_FLOW: + l4_mask = &cmd->fs.m_u.usr_ip4_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(l4_mask->ip4src) || + VALIDATE_MASK(l4_mask->ip4dst) || + VALIDATE_MASK(l4_mask->l4_4_bytes) || + VALIDATE_MASK(l4_mask->proto) || + VALIDATE_MASK(l4_mask->ip_ver) || + VALIDATE_MASK(l4_mask->tos)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + case ETHER_FLOW: + eth_mask = &cmd->fs.m_u.ether_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(eth_mask->h_dest) || + VALIDATE_MASK(eth_mask->h_source) || + VALIDATE_MASK(eth_mask->h_proto)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + default: + netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n", + cmd->fs.flow_type); + return -EINVAL; + } + + if ((cmd->fs.flow_type & FLOW_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || + VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { + netdev_err(dev, "rxnfc: user-def not supported\n"); + return -EINVAL; + } + } + + if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + } + + return 0; +} + +static int bcmgenet_insert_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *loc_rule; + int err; + + if (priv->hw_params->hfb_filter_size < 128) { + netdev_err(dev, "rxnfc: Not supported by this device\n"); + return -EINVAL; + } + + if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && + cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { + netdev_err(dev, "rxnfc: Unsupported action (%llu)\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + + err = bcmgenet_validate_flow(dev, cmd); + if (err) + return err; + + loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&loc_rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memcpy(&loc_rule->fs, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); + + list_add_tail(&loc_rule->list, &priv->rxnfc_list); + + return 0; +} + +static int bcmgenet_delete_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[cmd->fs.location]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { + err = -ENOENT; + goto out; + } + + if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); + +out: + return err; +} + +static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = bcmgenet_insert_flow(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = bcmgenet_delete_flow(dev, cmd); + break; + default: + netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", + cmd->cmd); + return -EINVAL; + } + + return err; +} + +static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, + int loc) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[loc]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) + err = -ENOENT; + else + memcpy(&cmd->fs, &rule->fs, + sizeof(struct ethtool_rx_flow_spec)); + + return err; +} + +static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv) +{ + struct list_head *pos; + int res = 0; + + list_for_each(pos, &priv->rxnfc_list) + res++; + + return res; +} + +static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + int i = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->hw_params->rx_queues ?: 1; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bcmgenet_get_num_flows(priv); + cmd->data = MAX_NUM_OF_FS_RULES; + break; + case ETHTOOL_GRXCLSRULE: + err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (i < cmd->rule_cnt) + rule_locs[i++] = rule->fs.location; + cmd->rule_cnt = i; + cmd->data = MAX_NUM_OF_FS_RULES; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +/* standard ethtool support functions. */ +static const struct ethtool_ops bcmgenet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .begin = bcmgenet_begin, + .complete = bcmgenet_complete, + .get_strings = bcmgenet_get_strings, + .get_sset_count = bcmgenet_get_sset_count, + .get_ethtool_stats = bcmgenet_get_ethtool_stats, + .get_drvinfo = bcmgenet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = bcmgenet_get_msglevel, + .set_msglevel = bcmgenet_set_msglevel, + .get_wol = bcmgenet_get_wol, + .set_wol = bcmgenet_set_wol, + .get_eee = bcmgenet_get_eee, + .set_eee = bcmgenet_set_eee, + .nway_reset = phy_ethtool_nway_reset, + .get_coalesce = bcmgenet_get_coalesce, + .set_coalesce = bcmgenet_set_coalesce, + .get_link_ksettings = bcmgenet_get_link_ksettings, + .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, + .get_rxnfc = bcmgenet_get_rxnfc, + .set_rxnfc = bcmgenet_set_rxnfc, +}; + +/* Power down the unimac, based on mode. */ +static int bcmgenet_power_down(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + int ret = 0; + u32 reg; + + switch (mode) { + case GENET_POWER_CABLE_SENSE: + phy_detach(priv->dev->phydev); + break; + + case GENET_POWER_WOL_MAGIC: + ret = bcmgenet_wol_power_down_cfg(priv, mode); + break; + + case GENET_POWER_PASSIVE: + /* Power down LED */ + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + if (GENET_IS_V5(priv)) + reg |= EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR; + else + reg |= EXT_PWR_DOWN_PHY; + + reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + + bcmgenet_phy_power_set(priv->dev, false); + } + break; + default: + break; + } + + return ret; +} + +static void bcmgenet_power_up(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (!(priv->hw_params->flags & GENET_HAS_EXT)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + + switch (mode) { + case GENET_POWER_PASSIVE: + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); + if (GENET_IS_V5(priv)) { + reg &= ~(EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR); + reg |= EXT_PHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + mdelay(1); + + reg &= ~EXT_PHY_RESET; + } else { + reg &= ~EXT_PWR_DOWN_PHY; + reg |= EXT_PWR_DN_EN_LD; + } + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_phy_power_set(priv->dev, true); + break; + + case GENET_POWER_CABLE_SENSE: + /* enable APD */ + if (!GENET_IS_V5(priv)) { + reg |= EXT_PWR_DN_EN_LD; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + break; + case GENET_POWER_WOL_MAGIC: + bcmgenet_wol_power_up_cfg(priv, mode); + return; + default: + break; + } +} + +static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Advancing local write pointer */ + if (ring->write_ptr == ring->end_ptr) + ring->write_ptr = ring->cb_ptr; + else + ring->write_ptr++; + + return tx_cb_ptr; +} + +static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Rewinding local write pointer */ + if (ring->write_ptr == ring->cb_ptr) + ring->write_ptr = ring->end_ptr; + else + ring->write_ptr--; + + return tx_cb_ptr; +} + +static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_SET); +} + +/* Simple helper to free a transmit control block's resources + * Returns an skb when the last transmit control block associated with the + * skb is freed. The skb should be freed by the caller if necessary. + */ +static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + + if (skb) { + cb->skb = NULL; + if (cb == GENET_CB(skb)->first_cb) + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + + if (cb == GENET_CB(skb)->last_cb) + return skb; + + } else if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_page(dev, + dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return NULL; +} + +/* Simple helper to free a receive control block's resources */ +static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + cb->skb = NULL; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return skb; +} + +/* Unlocked version of the reclaim routine */ +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int txbds_processed = 0; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; + unsigned int txbds_ready; + unsigned int c_index; + struct sk_buff *skb; + + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_CLEAR); + else + bcmgenet_intrl2_1_writel(priv, (1 << ring->index), + INTRL2_CPU_CLEAR); + + /* Compute how many buffers are transmitted since last xmit call */ + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) + & DMA_C_INDEX_MASK; + txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, tx_done, dev, + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); + + /* Reclaim transmitted buffers */ + while (txbds_processed < txbds_ready) { + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, + &priv->tx_cbs[ring->clean_ptr]); + if (skb) { + pkts_compl++; + bytes_compl += GENET_CB(skb)->bytes_sent; + if (!priv->ecdev) + dev_consume_skb_any(skb); + } + + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; + } + + ring->free_bds += txbds_processed; + ring->c_index = c_index; + + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + + if (!priv->ecdev) + netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), + pkts_compl, bytes_compl); + + return txbds_processed; +} + +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + unsigned int released; + + spin_lock_bh(&ring->lock); + released = __bcmgenet_tx_reclaim(dev, ring); + spin_unlock_bh(&ring->lock); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + struct netdev_queue *txq; + + if (ring->priv->ecdev) + return 0; + + spin_lock(&ring->lock); + work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); + netif_tx_wake_queue(txq); + } + spin_unlock(&ring->lock); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring); + + return 0; + } + + return budget; +} + +static void bcmgenet_tx_reclaim_all(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_is_multiqueue(dev)) { + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); + } + + bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); +} + +/* Reallocate the SKB to put enough headroom in front of it and insert + * the transmit checksum offsets in the descriptors + */ +static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, + struct sk_buff *skb) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct status_64 *status = NULL; + struct sk_buff *new_skb; + u16 offset; + u8 ip_proto; + __be16 ip_ver; + u32 tx_csum_info; + + if (unlikely(skb_headroom(skb) < sizeof(*status))) { + /* If 64 byte status block enabled, must make sure skb has + * enough headroom for us to insert 64B status block. + */ + BUG_ON(priv->ecdev); + new_skb = skb_realloc_headroom(skb, sizeof(*status)); + if (!new_skb) { + dev_kfree_skb_any(skb); + priv->mib.tx_realloc_tsb_failed++; + dev->stats.tx_dropped++; + return NULL; + } + dev_consume_skb_any(skb); + skb = new_skb; + priv->mib.tx_realloc_tsb++; + } + + skb_push(skb, sizeof(*status)); + status = (struct status_64 *)skb->data; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ip_ver = skb->protocol; + switch (ip_ver) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + /* don't use UDP flag */ + ip_proto = 0; + break; + } + + offset = skb_checksum_start_offset(skb) - sizeof(*status); + tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | + (offset + skb->csum_offset) | + STATUS_TX_CSUM_LV; + + /* Set the special UDP flag for UDP */ + if (ip_proto == IPPROTO_UDP) + tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; + + status->tx_csum_info = tx_csum_info; + } + + return skb; +} + +static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_tx_ring *ring = NULL; + struct enet_cb *tx_cb_ptr; + struct netdev_queue *txq; + int nr_frags, index; + dma_addr_t mapping; + unsigned int size; + skb_frag_t *frag; + u32 len_stat; + int ret; + int i; + + index = skb_get_queue_mapping(skb); + /* Mapping strategy: + * queue_mapping = 0, unclassified, packet xmited through ring16 + * queue_mapping = 1, goes to ring 0. (highest priority queue + * queue_mapping = 2, goes to ring 1. + * queue_mapping = 3, goes to ring 2. + * queue_mapping = 4, goes to ring 3. + */ + if (index == 0) + index = DESC_INDEX; + else + index -= 1; + + ring = &priv->tx_rings[index]; + txq = netdev_get_tx_queue(dev, ring->queue); + + nr_frags = skb_shinfo(skb)->nr_frags; + + spin_lock(&ring->lock); + if (ring->free_bds <= (nr_frags + 1)) { + if (!priv->ecdev && !netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + netdev_err(dev, + "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Retain how many bytes will be sent on the wire, without TSB inserted + * by transmit checksum offload + */ + GENET_CB(skb)->bytes_sent = skb->len; + + /* add the Transmit Status Block */ + skb = bcmgenet_add_tsb(dev, skb); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + + for (i = 0; i <= nr_frags; i++) { + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + BUG_ON(!tx_cb_ptr); + + if (!i) { + /* Transmit single SKB or head of fragment list */ + GENET_CB(skb)->first_cb = tx_cb_ptr; + size = skb_headlen(skb); + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + /* xmit fragment */ + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); + } + + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); + ret = NETDEV_TX_OK; + goto out_unmap_frags; + } + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, size); + + tx_cb_ptr->skb = skb; + + len_stat = (size << DMA_BUFLENGTH_SHIFT) | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ + if (!i) { + len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_stat |= DMA_TX_DO_CSUM; + } + if (i == nr_frags) + len_stat |= DMA_EOP; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); + } + + GENET_CB(skb)->last_cb = tx_cb_ptr; + if (priv->ecdev) { + /* give back headroom for next ethercat invocation */ + __skb_pull(skb, sizeof(struct status_64)); + } + skb_tx_timestamp(skb); + + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= nr_frags + 1; + ring->prod_index += nr_frags + 1; + ring->prod_index &= DMA_P_INDEX_MASK; + + if (!priv->ecdev) { + netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); + + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) + netif_tx_stop_queue(txq); + } + + if (priv->ecdev || !netdev_xmit_more() || netif_xmit_stopped(txq)) + /* Packets are ready, update producer index */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); +out: + spin_unlock(&ring->lock); + + return ret; + +out_unmap_frags: + /* Back up for failed control block mapping */ + bcmgenet_put_txcb(priv, ring); + + /* Unmap successfully mapped control blocks */ + while (i-- > 0) { + tx_cb_ptr = bcmgenet_put_txcb(priv, ring); + bcmgenet_free_tx_cb(kdev, tx_cb_ptr); + } + + if (!priv->ecdev) + dev_kfree_skb(skb); + goto out; +} + +static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) +{ + struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; + struct sk_buff *rx_skb; + dma_addr_t mapping; + + /* Allocate a new Rx skb */ + skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, + GFP_ATOMIC | __GFP_NOWARN); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb allocation failed\n", __func__); + return NULL; + } + + /* DMA-map the new Rx skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + dev_kfree_skb_any(skb); + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + return NULL; + } + + /* Grab the current Rx skb from the ring and DMA-unmap it */ + rx_skb = bcmgenet_free_rx_cb(kdev, cb); + + /* Put the new Rx skb on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + + /* Return the current Rx skb to caller */ + return rx_skb; +} + +/* bcmgenet_desc_rx - descriptor based rx process. + * this could be called from bottom half, or from NAPI polling method. + */ +static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, + unsigned int budget) +{ + struct bcmgenet_priv *priv = ring->priv; + struct net_device *dev = priv->dev; + struct device *kdev = &priv->pdev->dev; + struct enet_cb *cb; + struct sk_buff *skb; + u32 dma_length_status; + unsigned long dma_flag; + int len; + unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int bytes_processed = 0; + unsigned int p_index, mask; + unsigned int discards; + + if (!priv->ecdev) { + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) { + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_CLEAR); + } else { + mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); + bcmgenet_intrl2_1_writel(priv, + mask, + INTRL2_CPU_CLEAR); + } + } + + p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); + + discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & + DMA_P_INDEX_DISCARD_CNT_MASK; + if (discards > ring->old_discards) { + discards = discards - ring->old_discards; + ring->errors += discards; + ring->old_discards += discards; + + /* Clear HW register when we reach 75% of maximum 0xFFFF */ + if (ring->old_discards >= 0xC000) { + ring->old_discards = 0; + bcmgenet_rdma_ring_writel(priv, ring->index, 0, + RDMA_PROD_INDEX); + } + } + + p_index &= DMA_P_INDEX_MASK; + rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, rx_status, dev, + "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); + + while ((rxpktprocessed < rxpkttoprocess) && + (rxpktprocessed < budget)) { + struct status_64 *status; + __be16 rx_csum; + + cb = &priv->rx_cbs[ring->read_ptr]; + if (priv->ecdev) + /* DMA unmap current skb */ + skb = bcmgenet_free_rx_cb(kdev, cb); + else + skb = bcmgenet_rx_refill(priv, cb); + + if (unlikely(!skb)) { + ring->dropped++; + goto next; + } + + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + if (dev->features & NETIF_F_RXCSUM) { + rx_csum = (__force __be16)(status->rx_csum & 0xffff); + if (rx_csum) { + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + } + + /* DMA flags and length are still valid no matter how + * we got the Receive Status Vector (64B RSB or register) + */ + dma_flag = dma_length_status & 0xffff; + len = dma_length_status >> DMA_BUFLENGTH_SHIFT; + + netif_dbg(priv, rx_status, dev, + "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); + + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { + netif_err(priv, rx_status, dev, + "dropping fragmented packet!\n"); + ring->errors++; + if (!priv->ecdev) + dev_kfree_skb_any(skb); + goto next; + } + + /* report errors */ + if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER))) { + netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", + (unsigned int)dma_flag); + if (dma_flag & DMA_RX_CRC_ERROR) + dev->stats.rx_crc_errors++; + if (dma_flag & DMA_RX_OV) + dev->stats.rx_over_errors++; + if (dma_flag & DMA_RX_NO) + dev->stats.rx_frame_errors++; + if (dma_flag & DMA_RX_LG) + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (!priv->ecdev) + dev_kfree_skb_any(skb); + goto next; + } /* error packet */ + + if (!priv->ecdev) { + skb_put(skb, len); + + /* remove RSB and hardware 2bytes added for IP alignment */ + skb_pull(skb, 66); + } + len -= 66; + + if (priv->crc_fwd_en) { + if (!priv->ecdev) + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + bytes_processed += len; + + /*Finish setting up the received SKB and send it to the kernel*/ + skb->protocol = eth_type_trans(skb, priv->dev); + ring->packets++; + ring->bytes += len; + if (dma_flag & DMA_RX_MULT) + dev->stats.multicast++; + + if (priv->ecdev) { + dma_addr_t mapping; + /* skip status block and padding in skb */ + const unsigned char *data = skb->data + 66; + + ecdev_receive(priv->ecdev, data, len); + + /* remap skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + dev_kfree_skb_any(skb); + } else { + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + } + } else { + /* Notify kernel */ + napi_gro_receive(&ring->napi, skb); + } + netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); + +next: + rxpktprocessed++; + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); + } + + ring->dim.bytes = bytes_processed; + ring->dim.packets = rxpktprocessed; + + return rxpktprocessed; +} + +/* Rx NAPI polling method */ +static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_rx_ring *ring = container_of(napi, + struct bcmgenet_rx_ring, napi); + struct dim_sample dim_sample = {}; + unsigned int work_done; + + if (ring->priv->ecdev) + return 0; + + work_done = bcmgenet_desc_rx(ring, budget); + + if (work_done < budget) { + napi_complete_done(napi, work_done); + ring->int_enable(ring); + } + + if (ring->dim.use_dim) { + dim_update_sample(ring->dim.event_ctr, ring->dim.packets, + ring->dim.bytes, &dim_sample); + net_dim(&ring->dim.dim, dim_sample); + } + + return work_done; +} + + +/** +* ec_poll - EtherCAT poll routine +* @netdev: net device structure +* +* This function can never fail. +* +**/ +static void ec_poll(struct net_device *netdev) +{ + struct bcmgenet_priv *priv = netdev_priv(netdev); + unsigned i; + int budget = 64; + + bcmgenet_tx_reclaim_all(netdev); + for (i = 0; i < priv->hw_params->rx_queues; i++) { + bcmgenet_desc_rx(&priv->rx_rings[i], budget); + } + + bcmgenet_desc_rx(&priv->rx_rings[DESC_INDEX], budget); +} + +static void bcmgenet_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct bcmgenet_net_dim *ndim = + container_of(dim, struct bcmgenet_net_dim, dim); + struct bcmgenet_rx_ring *ring = + container_of(ndim, struct bcmgenet_rx_ring, dim); + struct dim_cq_moder cur_profile = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); + dim->state = DIM_START_MEASURE; +} + +/* Assign skb to RX DMA descriptor. */ +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) +{ + struct enet_cb *cb; + struct sk_buff *skb; + int i; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* loop here for each buffer needing assign */ + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; + skb = bcmgenet_rx_refill(priv, cb); + if (skb) + dev_consume_skb_any(skb); + if (!cb->skb) + return -ENOMEM; + } + + return 0; +} + +static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) +{ + struct sk_buff *skb; + struct enet_cb *cb; + int i; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[i]; + + skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); + if (skb) + dev_consume_skb_any(skb); + } +} + +static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) +{ + u32 reg; + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + return; + if (enable) + reg |= mask; + else + reg &= ~mask; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-size packet + * to be processed + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static void reset_umac(struct bcmgenet_priv *priv) +{ + /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ + bcmgenet_rbuf_ctrl_set(priv, 0); + udelay(10); + + /* issue soft reset and disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); + udelay(2); +} + +static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) +{ + /* Mask all interrupts.*/ + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); +} + +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + int0_enable |= UMAC_IRQ_PHY_DET_R; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + +static void init_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + u32 reg; + u32 int0_enable = 0; + + dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); + + reset_umac(priv); + + /* clear tx/rx counter */ + bcmgenet_umac_writel(priv, + MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, + UMAC_MIB_CTRL); + bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); + + bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* init tx registers, enable TSB */ + reg = bcmgenet_tbuf_ctrl_get(priv); + reg |= TBUF_64B_EN; + bcmgenet_tbuf_ctrl_set(priv, reg); + + /* init rx registers, enable ip header optimization and RSB */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + reg |= RBUF_ALIGN_2B | RBUF_64B_EN; + bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + + /* enable rx checksumming */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (priv->crc_fwd_en) + reg |= RBUF_SKIP_FCS; + else + reg &= ~RBUF_SKIP_FCS; + bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL); + + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) + bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); + + bcmgenet_intr_disable(priv); + + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + reg = bcmgenet_bp_mc_get(priv); + reg |= BIT(priv->hw_params->bp_in_en_shift); + + /* bp_mask: back pressure mask */ + if (netif_is_multiqueue(priv->dev)) + reg |= priv->hw_params->bp_in_mask; + else + reg &= ~priv->hw_params->bp_in_mask; + bcmgenet_bp_mc_set(priv, reg); + } + + /* Enable MDIO interrupts on GENET v3+ */ + if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) { + int0_enable |= UMAC_IRQ_MDIO_ERROR; + if (!priv->ecdev) + int0_enable |= UMAC_IRQ_MDIO_DONE; + } + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + + dev_dbg(kdev, "done init umac\n"); +} + +static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, + void (*cb)(struct work_struct *work)) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + struct dim_cq_moder moder; + u32 usecs, pkts; + + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ring->priv->ecdev) + return; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_rx_moderation(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +/* Initialize a Tx ring along with corresponding hardware registers */ +static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + u32 flow_period_val = 0; + + spin_lock_init(&ring->lock); + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->queue = 0; + ring->int_enable = bcmgenet_tx_ring16_int_enable; + ring->int_disable = bcmgenet_tx_ring16_int_disable; + } else { + ring->queue = index + 1; + ring->int_enable = bcmgenet_tx_ring_int_enable; + ring->int_disable = bcmgenet_tx_ring_int_disable; + } + ring->cbs = priv->tx_cbs + start_ptr; + ring->size = size; + ring->clean_ptr = start_ptr; + ring->c_index = 0; + ring->free_bds = size; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + ring->prod_index = 0; + + /* Set flow period for ring != 16 */ + if (index != DESC_INDEX) + flow_period_val = ENET_MAX_MTU_SIZE << 16; + + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); + /* Disable rate control for now */ + bcmgenet_tdma_ring_writel(priv, index, flow_period_val, + TDMA_FLOW_PERIOD); + bcmgenet_tdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + + /* Set start and end address, read and write pointers */ + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_READ_PTR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_WRITE_PTR); + bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + if (priv->ecdev) + return; + /* Initialize Tx NAPI */ + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); +} + +/* Initialize a RDMA ring */ +static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + int ret; + + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->int_enable = bcmgenet_rx_ring16_int_enable; + ring->int_disable = bcmgenet_rx_ring16_int_disable; + } else { + ring->int_enable = bcmgenet_rx_ring_int_enable; + ring->int_disable = bcmgenet_rx_ring_int_disable; + } + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) + return ret; + + bcmgenet_init_dim(ring, bcmgenet_dim_work); + bcmgenet_init_rx_coalesce(ring); + + if (!priv->ecdev) { + /* Initialize Rx NAPI */ + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, + NAPI_POLL_WEIGHT); + } + + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + bcmgenet_rdma_ring_writel(priv, index, + (DMA_FC_THRESH_LO << + DMA_XOFF_THRESHOLD_SHIFT) | + DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + return ret; +} + +static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Tx queues + * + * Queues 0-3 are priority-based, each one has 32 descriptors, + * with queue 0 being the highest priority queue. + * + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. + * + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] + */ +static void bcmgenet_init_tx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; + u32 dma_priority[3] = {0, 0, 0}; + + dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Enable strict priority arbiter mode */ + bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + + /* Initialize Tx priority queues */ + for (i = 0; i < priv->hw_params->tx_queues; i++) { + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(i)] |= + ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); + } + + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= + ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << + DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ + bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); + bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); + bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); +} + +static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + + /* Enable rings */ + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Configure ring as descriptor ring and re-enable DMA if enabled */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; +} + +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + u32 dma_ctrl; + int i; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + return ret; +} + +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + struct netdev_queue *txq; + int i; + + bcmgenet_fini_rx_napi(priv); + bcmgenet_fini_tx_napi(priv); + + for (i = 0; i < priv->num_tx_bds; i++) + dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, + priv->tx_cbs + i)); + + for (i = 0; i < priv->hw_params->tx_queues; i++) { + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); + netdev_tx_reset_queue(txq); + } + + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); + netdev_tx_reset_queue(txq); + + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); +} + +/* init_edma: Initialize DMA control register */ +static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +{ + int ret; + unsigned int i; + struct enet_cb *cb; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } + + /* Initialize common TX ring structures */ + priv->tx_bds = priv->base + priv->hw_params->tdma_offset; + priv->num_tx_bds = TOTAL_DESC; + priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->tx_cbs) { + kfree(priv->rx_cbs); + return -ENOMEM; + } + + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } + + /* Init rDma */ + bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); + if (ret) { + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); + return ret; + } + + /* Init tDma */ + bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); + + return 0; +} + +/* Interrupt bottom half */ +static void bcmgenet_irq_task(struct work_struct *work) +{ + unsigned int status; + struct bcmgenet_priv *priv = container_of( + work, struct bcmgenet_priv, bcmgenet_irq_work); + + netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + + spin_lock_irq(&priv->lock); + status = priv->irq0_stat; + priv->irq0_stat = 0; + spin_unlock_irq(&priv->lock); + + if (status & UMAC_IRQ_PHY_DET_R && + priv->dev->phydev->autoneg != AUTONEG_ENABLE) { + phy_init_hw(priv->dev->phydev); + genphy_config_aneg(priv->dev->phydev); + } + + /* Link UP/DOWN event */ + if (status & UMAC_IRQ_LINK_EVENT) + phy_mac_interrupt(priv->dev->phydev); + +} + +/* bcmgenet_isr1: handle Rx and Tx priority queues */ +static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int index, status; + + /* Read irq status */ + status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "%s: IRQ=0x%x\n", __func__, status); + + /* Check Rx priority queue interrupts */ + for (index = 0; index < priv->hw_params->rx_queues; index++) { + if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + continue; + + rx_ring = &priv->rx_rings[index]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + /* Check Tx priority queue interrupts */ + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(status & BIT(index))) + continue; + + tx_ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + return IRQ_HANDLED; +} + +/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ +static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int status; + unsigned long flags; + + /* Read irq status */ + status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "IRQ=0x%x\n", status); + + if (status & UMAC_IRQ_RXDMA_DONE) { + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + if (status & UMAC_IRQ_TXDMA_DONE) { + tx_ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + wake_up(&priv->wq); + } + + /* all other interested interrupts handled in bottom half */ + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); + if (status) { + /* Save irq status for bottom-half processing. */ + spin_lock_irqsave(&priv->lock, flags); + priv->irq0_stat |= status; + spin_unlock_irqrestore(&priv->lock, flags); + + schedule_work(&priv->bcmgenet_irq_work); + } + + return IRQ_HANDLED; +} + +static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) +{ + /* Acknowledge the interrupt */ + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcmgenet_poll_controller(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Invoke the main RX/TX interrupt handler */ + disable_irq(priv->irq0); + bcmgenet_isr0(priv->irq0, priv); + enable_irq(priv->irq0); + + /* And the interrupt handler for RX/TX priority queues */ + disable_irq(priv->irq1); + bcmgenet_isr1(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + +static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) +{ + u32 reg; + + reg = bcmgenet_rbuf_ctrl_get(priv); + reg |= BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + + reg &= ~BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); +} + +static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0); + bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1); +} + +static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + u32 addr_tmp; + + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0); + put_unaligned_be32(addr_tmp, &addr[0]); + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1); + put_unaligned_be16(addr_tmp, &addr[4]); +} + +/* Returns a reusable dma control register value */ +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +{ + unsigned int i; + u32 reg; + u32 dma_ctrl; + + /* disable DMA */ + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); + udelay(10); + bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + + return dma_ctrl; +} + +static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) +{ + u32 reg; + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static void bcmgenet_netif_start(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Start the network engine */ + bcmgenet_set_rx_mode(dev); + bcmgenet_enable_rx_napi(priv); + + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); + + bcmgenet_enable_tx_napi(priv); + + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + + phy_start(dev->phydev); +} + +static int bcmgenet_open(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + int ret; + + netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); + + /* Turn on the clock */ + clk_prepare_enable(priv->clk); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + /* take MAC out of reset */ + bcmgenet_umac_reset(priv); + + init_umac(priv); + + /* Apply features again in case we changed them while interface was + * down + */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto err_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + /* HFB init */ + bcmgenet_hfb_init(priv); + + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq0); + goto err_fini_dma; + } + + ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq1); + goto err_irq0; + } + + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } + + bcmgenet_netif_start(dev); + + if (!priv->ecdev) + netif_tx_start_all_queues(dev); + + return 0; + +err_irq1: + free_irq(priv->irq1, priv); +err_irq0: + free_irq(priv->irq0, priv); +err_fini_dma: + bcmgenet_dma_teardown(priv); + bcmgenet_fini_dma(priv); +err_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static void bcmgenet_netif_stop(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + bcmgenet_disable_tx_napi(priv); + netif_tx_disable(dev); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + bcmgenet_dma_teardown(priv); + + /* Disable MAC transmit. TX DMA disabled must be done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + + phy_stop(dev->phydev); + bcmgenet_disable_rx_napi(priv); + bcmgenet_intr_disable(priv); + + /* Wait for pending work items to complete. Since interrupts are + * disabled no new work will be scheduled. + */ + cancel_work_sync(&priv->bcmgenet_irq_work); + + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); +} + +static int bcmgenet_close(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); + + bcmgenet_netif_stop(dev); + + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(dev->phydev); + + free_irq(priv->irq0, priv); + free_irq(priv->irq1, priv); + + if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = ring->priv; + u32 p_index, c_index, intsts, intmsk; + struct netdev_queue *txq; + unsigned int free_bds; + bool txq_stopped; + + if (!netif_msg_tx_err(priv)) + return; + + txq = netdev_get_tx_queue(priv->dev, ring->queue); + + spin_lock(&ring->lock); + if (ring->index == DESC_INDEX) { + intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; + } else { + intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = 1 << ring->index; + } + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); + txq_stopped = netif_tx_queue_stopped(txq); + free_bds = ring->free_bds; + spin_unlock(&ring->lock); + + netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" + "TX queue status: %s, interrupts: %s\n" + "(sw)free_bds: %d (sw)size: %d\n" + "(sw)p_index: %d (hw)p_index: %d\n" + "(sw)c_index: %d (hw)c_index: %d\n" + "(sw)clean_p: %d (sw)write_p: %d\n" + "(sw)cb_ptr: %d (sw)end_ptr: %d\n", + ring->index, ring->queue, + txq_stopped ? "stopped" : "active", + intsts & intmsk ? "enabled" : "disabled", + free_bds, ring->size, + ring->prod_index, p_index & DMA_P_INDEX_MASK, + ring->c_index, c_index & DMA_C_INDEX_MASK, + ring->clean_ptr, ring->write_ptr, + ring->cb_ptr, ring->end_ptr); +} + +static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 int0_enable = 0; + u32 int1_enable = 0; + unsigned int q; + + netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + bcmgenet_dump_tx_queue(&priv->tx_rings[q]); + bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); + + bcmgenet_tx_reclaim_all(dev); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + int1_enable |= (1 << q); + + int0_enable = UMAC_IRQ_TXDMA_DONE; + + /* Re-enable TX interrupts if disabled */ + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + + netif_trans_update(dev); + + dev->stats.tx_errors++; + + netif_tx_wake_all_queues(dev); +} + +#define MAX_MDF_FILTER 17 + +static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, + unsigned char *addr, + int *i) +{ + bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], + UMAC_MDF_ADDR + (*i * 4)); + bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | + addr[4] << 8 | addr[5], + UMAC_MDF_ADDR + ((*i + 1) * 4)); + *i += 2; +} + +static void bcmgenet_set_rx_mode(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, nfilter; + u32 reg; + + netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); + + /* Number of filters needed */ + nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2; + + /* + * Turn on promicuous mode for three scenarios + * 1. IFF_PROMISC flag is set + * 2. IFF_ALLMULTI flag is set + * 3. The number of filters needed exceeds the number filters + * supported by the hardware. + */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || + (nfilter > MAX_MDF_FILTER)) { + reg |= CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); + return; + } else { + reg &= ~CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } + + /* update MDF filter */ + i = 0; + /* Broadcast */ + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); + /* my own address.*/ + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); + + /* Unicast */ + netdev_for_each_uc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Multicast */ + netdev_for_each_mc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Enable filters */ + reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); +} + +/* Set the hardware MAC address. */ +static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + /* Setting the MAC address at the hardware level is not possible + * without disabling the UniMAC RX/TX enable bits. + */ + if (netif_running(dev)) + return -EBUSY; + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + +static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long tx_bytes = 0, tx_packets = 0; + unsigned long rx_bytes = 0, rx_packets = 0; + unsigned long rx_errors = 0, rx_dropped = 0; + struct bcmgenet_tx_ring *tx_ring; + struct bcmgenet_rx_ring *rx_ring; + unsigned int q; + + for (q = 0; q < priv->hw_params->tx_queues; q++) { + tx_ring = &priv->tx_rings[q]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + } + tx_ring = &priv->tx_rings[DESC_INDEX]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + + for (q = 0; q < priv->hw_params->rx_queues; q++) { + rx_ring = &priv->rx_rings[q]; + + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + } + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_packets = rx_packets; + dev->stats.rx_errors = rx_errors; + dev->stats.rx_missed_errors = rx_errors; + dev->stats.rx_dropped = rx_dropped; + return &dev->stats; +} + +static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || + priv->phy_interface != PHY_INTERFACE_MODE_MOCA) + return -EOPNOTSUPP; + + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + return 0; +} + +static const struct net_device_ops bcmgenet_netdev_ops = { + .ndo_open = bcmgenet_open, + .ndo_stop = bcmgenet_close, + .ndo_start_xmit = bcmgenet_xmit, + .ndo_tx_timeout = bcmgenet_timeout, + .ndo_set_rx_mode = bcmgenet_set_rx_mode, + .ndo_set_mac_address = bcmgenet_set_mac_addr, + .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_set_features = bcmgenet_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcmgenet_poll_controller, +#endif + .ndo_get_stats = bcmgenet_get_stats, + .ndo_change_carrier = bcmgenet_change_carrier, +}; + +/* Array of GENET hardware parameters/characteristics */ +static struct bcmgenet_hw_params bcmgenet_hw_params[] = { + [GENET_V1] = { + .tx_queues = 0, + .tx_bds_per_q = 0, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .hfb_offset = 0x1000, + .rdma_offset = 0x2000, + .tdma_offset = 0x3000, + .words_per_bd = 2, + }, + [GENET_V2] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x1000, + .hfb_reg_offset = 0x2000, + .rdma_offset = 0x3000, + .tdma_offset = 0x4000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT, + }, + [GENET_V3] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x10000, + .tdma_offset = 0x11000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | + GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V4] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V5] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, +}; + +/* Infer hardware parameters from the detected GENET version */ +static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) +{ + struct bcmgenet_hw_params *params; + u32 reg; + u8 major; + u16 gphy_rev; + + if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v4; + } else if (GENET_IS_V3(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V2(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v2; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V1(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v1; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } + + /* enum genet_version starts at 1 */ + priv->hw_params = &bcmgenet_hw_params[priv->version]; + params = priv->hw_params; + + /* Read GENET HW version */ + reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); + major = (reg >> 24 & 0x0f); + if (major == 6) + major = 5; + else if (major == 5) + major = 4; + else if (major == 0) + major = 1; + if (major != priv->version) { + dev_err(&priv->pdev->dev, + "GENET version mismatch, got: %d, configured for: %d\n", + major, priv->version); + } + + /* Print the GENET core version */ + dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, + major, (reg >> 16) & 0x0f, reg & 0xffff); + + /* Store the integrated PHY revision for the MDIO probing function + * to pass this information to the PHY driver. The PHY driver expects + * to find the PHY major revision in bits 15:8 while the GENET register + * stores that information in bits 7:0, account for that. + * + * On newer chips, starting with PHY revision G0, a new scheme is + * deployed similar to the Starfighter 2 switch with GPHY major + * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 + * is reserved as well as special value 0x01ff, we have a small + * heuristic to check for the new GPHY revision and re-arrange things + * so the GPHY driver is happy. + */ + gphy_rev = reg & 0xffff; + + if (GENET_IS_V5(priv)) { + /* The EPHY revision should come from the MDIO registers of + * the PHY not from GENET. + */ + if (gphy_rev != 0) { + pr_warn("GENET is reporting EPHY revision: 0x%04x\n", + gphy_rev); + } + /* This is reserved so should require special treatment */ + } else if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + /* This is the good old scheme, just GPHY major, no minor nor patch */ + } else if ((gphy_rev & 0xf0) != 0) { + priv->gphy_rev = gphy_rev << 8; + /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ + } else if ((gphy_rev & 0xff00) != 0) { + priv->gphy_rev = gphy_rev; + } + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!(params->flags & GENET_HAS_40BITS)) + pr_warn("GENET does not support 40-bits PA\n"); +#endif + + pr_debug("Configuration for version: %d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" + "BP << en: %2d, BP msk: 0x%05x\n" + "HFB count: %2d, QTAQ msk: 0x%05x\n" + "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" + "RDMA: 0x%05x, TDMA: 0x%05x\n" + "Words/BD: %d\n", + priv->version, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, + params->bp_in_en_shift, params->bp_in_mask, + params->hfb_filter_cnt, params->qtag_mask, + params->tbuf_offset, params->hfb_offset, + params->hfb_reg_offset, + params->rdma_offset, params->tdma_offset, + params->words_per_bd); +} + +struct bcmgenet_plat_data { + enum bcmgenet_version version; + u32 dma_max_burst_length; +}; + +static const struct bcmgenet_plat_data v1_plat_data = { + .version = GENET_V1, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v2_plat_data = { + .version = GENET_V2, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v3_plat_data = { + .version = GENET_V3, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v4_plat_data = { + .version = GENET_V4, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v5_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data bcm2711_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = 0x08, +}; + +static const struct of_device_id bcmgenet_match[] = { + { .compatible = "brcm,genet-v1", .data = &v1_plat_data }, + { .compatible = "brcm,genet-v2", .data = &v2_plat_data }, + { .compatible = "brcm,genet-v3", .data = &v3_plat_data }, + { .compatible = "brcm,genet-v4", .data = &v4_plat_data }, + { .compatible = "brcm,genet-v5", .data = &v5_plat_data }, + { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcmgenet_match); + +static int bcmgenet_probe(struct platform_device *pdev) +{ + struct bcmgenet_platform_data *pd = pdev->dev.platform_data; + const struct bcmgenet_plat_data *pdata; + struct bcmgenet_priv *priv; + struct net_device *dev; + unsigned int i; + int err = -EIO; + + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); + if (!dev) { + dev_err(&pdev->dev, "can't allocate net device\n"); + return -ENOMEM; + } + + priv = netdev_priv(dev); + priv->irq0 = platform_get_irq(pdev, 0); + if (priv->irq0 < 0) { + err = priv->irq0; + goto err; + } + priv->irq1 = platform_get_irq(pdev, 1); + if (priv->irq1 < 0) { + err = priv->irq1; + goto err; + } + priv->wol_irq = platform_get_irq_optional(pdev, 2); + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + err = PTR_ERR(priv->base); + goto err; + } + + spin_lock_init(&priv->lock); + + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + dev->watchdog_timeo = 2 * HZ; + dev->ethtool_ops = &bcmgenet_ethtool_ops; + dev->netdev_ops = &bcmgenet_netdev_ops; + + priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); + + /* Set default features */ + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; + + /* Request the WOL interrupt and advertise suspend if available */ + priv->wol_irq_disabled = true; + if (priv->wol_irq > 0) { + err = devm_request_irq(&pdev->dev, priv->wol_irq, + bcmgenet_wol_isr, 0, dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + } + + /* Set the needed headroom to account for any possible + * features enabling/disabling at runtime + */ + dev->needed_headroom += 64; + + netdev_boot_setup_check(dev); + + priv->dev = dev; + priv->pdev = pdev; + + pdata = device_get_match_data(&pdev->dev); + if (pdata) { + priv->version = pdata->version; + priv->dma_max_burst_length = pdata->dma_max_burst_length; + } else { + priv->version = pd->genet_version; + priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; + } + + priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); + if (IS_ERR(priv->clk)) { + dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); + err = PTR_ERR(priv->clk); + goto err; + } + + err = clk_prepare_enable(priv->clk); + if (err) + goto err; + + bcmgenet_set_hw_params(priv); + + err = -EIO; + if (priv->hw_params->flags & GENET_HAS_40BITS) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_clk_disable; + + /* Mii wait queue */ + init_waitqueue_head(&priv->wq); + /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ + priv->rx_buf_len = RX_BUF_LENGTH; + INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); + + priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); + if (IS_ERR(priv->clk_wol)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); + err = PTR_ERR(priv->clk_wol); + goto err_clk_disable; + } + + priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); + if (IS_ERR(priv->clk_eee)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); + err = PTR_ERR(priv->clk_eee); + goto err_clk_disable; + } + + /* If this is an internal GPHY, power it on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + if (pd && !IS_ERR_OR_NULL(pd->mac_address)) + ether_addr_copy(dev->dev_addr, pd->mac_address); + else + if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN)) + if (has_acpi_companion(&pdev->dev)) + bcmgenet_get_hw_addr(priv, dev->dev_addr); + + if (!is_valid_ether_addr(dev->dev_addr)) { + dev_warn(&pdev->dev, "using random Ethernet MAC\n"); + eth_hw_addr_random(dev); + } + + reset_umac(priv); + + err = bcmgenet_mii_init(dev); + if (err) + goto err_clk_disable; + + /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues + * just the ring 16 descriptor based TX + */ + netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); + netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + + /* Set default coalescing parameters */ + for (i = 0; i < priv->hw_params->rx_queues; i++) + priv->rx_rings[i].rx_max_coalesced_frames = 1; + priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; + + /* libphy will determine the link state */ + netif_carrier_off(dev); + + /* Turn off the main clock, WOL clock is handled separately */ + clk_disable_unprepare(priv->clk); + + priv->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + if (priv->ecdev) { + err = ecdev_open(priv->ecdev); + if (err) { + ecdev_withdraw(priv->ecdev); + priv->ecdev = NULL; + bcmgenet_mii_exit(dev); + goto err; + } + } else { + err = register_netdev(dev); + if (err) { + bcmgenet_mii_exit(dev); + goto err; + } + } + + return err; + +err_clk_disable: + clk_disable_unprepare(priv->clk); +err: + free_netdev(dev); + return err; +} + +static int bcmgenet_remove(struct platform_device *pdev) +{ + struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); + + dev_set_drvdata(&pdev->dev, NULL); + if (priv->ecdev) { + ecdev_close(priv->ecdev); + ecdev_withdraw(priv->ecdev); + priv->ecdev = NULL; + } else + unregister_netdev(priv->dev); + bcmgenet_mii_exit(priv->dev); + free_netdev(priv->dev); + + return 0; +} + +static void bcmgenet_shutdown(struct platform_device *pdev) +{ + bcmgenet_remove(pdev); +} + +#ifdef CONFIG_PM_SLEEP +static int bcmgenet_resume_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + u32 reg; + + if (priv->ecdev) + ecdev_open(priv->ecdev); + else if (!netif_running(dev)) + return 0; + + /* Turn on the clock */ + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + if (device_may_wakeup(d) && priv->wolopts) { + /* Account for Wake-on-LAN events and clear those events + * (Some devices need more time between enabling the clocks + * and the interrupt register reflecting the wake event so + * read the register twice) + */ + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + if (reg & UMAC_IRQ_WAKE_EVENT) + pm_wakeup_event(&priv->pdev->dev, 0); + } + + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR); + + return 0; +} + +static int bcmgenet_resume(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + unsigned long dma_ctrl; + int ret; + + if (!netif_running(dev)) + return 0; + + /* From WOL-enabled suspend, switch to regular clock */ + if (device_may_wakeup(d) && priv->wolopts) + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + bcmgenet_umac_reset(priv); + + init_umac(priv); + + phy_init_hw(dev->phydev); + + /* Speed settings must be restored */ + genphy_config_aneg(dev->phydev); + bcmgenet_mii_config(priv->dev, false); + + /* Restore enabled features */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Restore hardware filters */ + bcmgenet_hfb_clear(priv); + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + bcmgenet_hfb_create_rxnfc_filter(priv, rule); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto out_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + if (!device_may_wakeup(d)) + phy_resume(dev->phydev); + + if (priv->eee.eee_enabled) + bcmgenet_eee_enable_set(dev, true); + + bcmgenet_netif_start(dev); + + if (priv->ecdev) + ecdev_open(priv->ecdev); + else + netif_device_attach(dev); + + return 0; + +out_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static int bcmgenet_suspend(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (priv->ecdev) { + ecdev_close(priv->ecdev); + } else { + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + } + + bcmgenet_netif_stop(dev); + + if (!device_may_wakeup(d)) + phy_suspend(dev->phydev); + + /* Disable filtering */ + bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); + + return 0; +} + +static int bcmgenet_suspend_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + if (priv->ecdev) { + ecdev_close(priv->ecdev); + } else if (!netif_running(dev)) + return 0; + + /* Prepare the device for Wake-on-LAN and switch to the slow clock */ + if (device_may_wakeup(d) && priv->wolopts) + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); + else if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + /* Let the framework handle resumption and leave the clocks on */ + if (ret) + return ret; + + /* Turn off the clocks */ + clk_disable_unprepare(priv->clk); + + return 0; +} +#else +#define bcmgenet_suspend NULL +#define bcmgenet_suspend_noirq NULL +#define bcmgenet_resume NULL +#define bcmgenet_resume_noirq NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops bcmgenet_pm_ops = { + .suspend = bcmgenet_suspend, + .suspend_noirq = bcmgenet_suspend_noirq, + .resume = bcmgenet_resume, + .resume_noirq = bcmgenet_resume_noirq, +}; + +static const struct acpi_device_id genet_acpi_match[] = { + { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, genet_acpi_match); + +static struct platform_driver bcmgenet_driver = { + .probe = bcmgenet_probe, + .remove = bcmgenet_remove, + .shutdown = bcmgenet_shutdown, + .driver = { + .name = "ec_bcmgenet", + .of_match_table = bcmgenet_match, + .pm = &bcmgenet_pm_ops, + .acpi_match_table = genet_acpi_match, + }, +}; +module_platform_driver(bcmgenet_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver " + "(EtherCAT enabled, Version " REV ")"); +MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: mdio-bcm-unimac"); diff --git a/devices/genet/bcmgenet-5.10-ethercat.h b/devices/genet/bcmgenet-5.10-ethercat.h new file mode 100644 index 00000000..d9aa8b95 --- /dev/null +++ b/devices/genet/bcmgenet-5.10-ethercat.h @@ -0,0 +1,764 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2020 Broadcom + */ + +#ifndef __BCMGENET_H__ +#define __BCMGENET_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* EtherCAT header file */ +#include "../ecdev.h" + +/* total number of Buffer Descriptors, same for Rx/Tx */ +#define TOTAL_DESC 256 + +/* which ring is descriptor based */ +#define DESC_INDEX 16 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN 6 +#define ENET_PAD 8 +#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ + ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) +#define DMA_MAX_BURST_LENGTH 0x10 + +/* misc. configuration */ +#define MAX_NUM_OF_FS_RULES 16 +#define CLEAR_ALL_HFB 0xFF +#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4) +#define DMA_FC_THRESH_LO 5 + +/* 64B receive/transmit status block */ +struct status_64 { + u32 length_status; /* length and peripheral status */ + u32 ext_status; /* Extended status*/ + u32 rx_csum; /* partial rx checksum */ + u32 unused1[9]; /* unused */ + u32 tx_csum_info; /* Tx checksum info. */ + u32 unused2[3]; /* unused */ +}; + +/* Rx status bits */ +#define STATUS_RX_EXT_MASK 0x1FFFFF +#define STATUS_RX_CSUM_MASK 0xFFFF +#define STATUS_RX_CSUM_OK 0x10000 +#define STATUS_RX_CSUM_FR 0x20000 +#define STATUS_RX_PROTO_TCP 0 +#define STATUS_RX_PROTO_UDP 1 +#define STATUS_RX_PROTO_ICMP 2 +#define STATUS_RX_PROTO_OTHER 3 +#define STATUS_RX_PROTO_MASK 3 +#define STATUS_RX_PROTO_SHIFT 18 +#define STATUS_FILTER_INDEX_MASK 0xFFFF +/* Tx status bits */ +#define STATUS_TX_CSUM_START_MASK 0X7FFF +#define STATUS_TX_CSUM_START_SHIFT 16 +#define STATUS_TX_CSUM_PROTO_UDP 0x8000 +#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF +#define STATUS_TX_CSUM_LV 0x80000000 + +/* DMA Descriptor */ +#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */ +#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */ +#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */ + +/* Rx/Tx common counter group */ +struct bcmgenet_pkt_counters { + u32 cnt_64; /* RO Received/Transmited 64 bytes packet */ + u32 cnt_127; /* RO Rx/Tx 127 bytes packet */ + u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */ + u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */ + u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */ + u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */ + u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */ + u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/ + u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/ + u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcmgenet_rx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkt; /* RO (0x428) Received pkt count*/ + u32 bytes; /* RO Received byte count */ + u32 mca; /* RO # of Received multicast pkt */ + u32 bca; /* RO # of Receive broadcast pkt */ + u32 fcs; /* RO # of Received FCS error */ + u32 cf; /* RO # of Received control frame pkt*/ + u32 pf; /* RO # of Received pause frame pkt */ + u32 uo; /* RO # of unknown op code pkt */ + u32 aln; /* RO # of alignment error count */ + u32 flr; /* RO # of frame length out of range count */ + u32 cde; /* RO # of code error pkt */ + u32 fcr; /* RO # of carrier sense error pkt */ + u32 ovr; /* RO # of oversize pkt*/ + u32 jbr; /* RO # of jabber count */ + u32 mtue; /* RO # of MTU error pkt*/ + u32 pok; /* RO # of Received good pkt */ + u32 uc; /* RO # of unicast pkt */ + u32 ppp; /* RO # of PPP pkt */ + u32 rcrc; /* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcmgenet_tx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkts; /* RO (0x4a8) Transmited pkt */ + u32 mca; /* RO # of xmited multicast pkt */ + u32 bca; /* RO # of xmited broadcast pkt */ + u32 pf; /* RO # of xmited pause frame count */ + u32 cf; /* RO # of xmited control frame count */ + u32 fcs; /* RO # of xmited FCS error count */ + u32 ovr; /* RO # of xmited oversize pkt */ + u32 drf; /* RO # of xmited deferral pkt */ + u32 edf; /* RO # of xmited Excessive deferral pkt*/ + u32 scl; /* RO # of xmited single collision pkt */ + u32 mcl; /* RO # of xmited multiple collision pkt*/ + u32 lcl; /* RO # of xmited late collision pkt */ + u32 ecl; /* RO # of xmited excessive collision pkt*/ + u32 frg; /* RO # of xmited fragments pkt*/ + u32 ncl; /* RO # of xmited total collision count */ + u32 jbr; /* RO # of xmited jabber count*/ + u32 bytes; /* RO # of xmited byte count */ + u32 pok; /* RO # of xmited good pkt */ + u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcmgenet_mib_counters { + struct bcmgenet_rx_counters rx; + struct bcmgenet_tx_counters tx; + u32 rx_runt_cnt; + u32 rx_runt_fcs; + u32 rx_runt_fcs_align; + u32 rx_runt_bytes; + u32 rbuf_ovflow_cnt; + u32 rbuf_err_cnt; + u32 mdf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed; +}; + +#define UMAC_HD_BKP_CTRL 0x004 +#define HD_FC_EN (1 << 0) +#define HD_FC_BKOFF_OK (1 << 1) +#define IPG_CONFIG_RX_SHIFT 2 +#define IPG_CONFIG_RX_MASK 0x1F + +#define UMAC_CMD 0x008 +#define CMD_TX_EN (1 << 0) +#define CMD_RX_EN (1 << 1) +#define UMAC_SPEED_10 0 +#define UMAC_SPEED_100 1 +#define UMAC_SPEED_1000 2 +#define UMAC_SPEED_2500 3 +#define CMD_SPEED_SHIFT 2 +#define CMD_SPEED_MASK 3 +#define CMD_PROMISC (1 << 4) +#define CMD_PAD_EN (1 << 5) +#define CMD_CRC_FWD (1 << 6) +#define CMD_PAUSE_FWD (1 << 7) +#define CMD_RX_PAUSE_IGNORE (1 << 8) +#define CMD_TX_ADDR_INS (1 << 9) +#define CMD_HD_EN (1 << 10) +#define CMD_SW_RESET (1 << 13) +#define CMD_LCL_LOOP_EN (1 << 15) +#define CMD_AUTO_CONFIG (1 << 22) +#define CMD_CNTL_FRM_EN (1 << 23) +#define CMD_NO_LEN_CHK (1 << 24) +#define CMD_RMT_LOOP_EN (1 << 25) +#define CMD_PRBL_EN (1 << 27) +#define CMD_TX_PAUSE_IGNORE (1 << 28) +#define CMD_TX_RX_EN (1 << 29) +#define CMD_RUNT_FILTER_DIS (1 << 30) + +#define UMAC_MAC0 0x00C +#define UMAC_MAC1 0x010 +#define UMAC_MAX_FRAME_LEN 0x014 + +#define UMAC_MODE 0x44 +#define MODE_LINK_STATUS (1 << 5) + +#define UMAC_EEE_CTRL 0x064 +#define EN_LPI_RX_PAUSE (1 << 0) +#define EN_LPI_TX_PFC (1 << 1) +#define EN_LPI_TX_PAUSE (1 << 2) +#define EEE_EN (1 << 3) +#define RX_FIFO_CHECK (1 << 4) +#define EEE_TX_CLK_DIS (1 << 5) +#define DIS_EEE_10M (1 << 6) +#define LP_IDLE_PREDICTION_MODE (1 << 7) + +#define UMAC_EEE_LPI_TIMER 0x068 +#define UMAC_EEE_WAKE_TIMER 0x06C +#define UMAC_EEE_REF_COUNT 0x070 +#define EEE_REFERENCE_COUNT_MASK 0xffff + +#define UMAC_TX_FLUSH 0x334 + +#define UMAC_MIB_START 0x400 + +#define UMAC_MDIO_CMD 0x614 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define UMAC_RBUF_OVFL_CNT_V1 0x61C +#define RBUF_OVFL_CNT_V2 0x80 +#define RBUF_OVFL_CNT_V3PLUS 0x94 + +#define UMAC_MPD_CTRL 0x620 +#define MPD_EN (1 << 0) +#define MPD_PW_EN (1 << 27) +#define MPD_MSEQ_LEN_SHIFT 16 +#define MPD_MSEQ_LEN_MASK 0xFF + +#define UMAC_MPD_PW_MS 0x624 +#define UMAC_MPD_PW_LS 0x628 +#define UMAC_RBUF_ERR_CNT_V1 0x634 +#define RBUF_ERR_CNT_V2 0x84 +#define RBUF_ERR_CNT_V3PLUS 0x98 +#define UMAC_MDF_ERR_CNT 0x638 +#define UMAC_MDF_CTRL 0x650 +#define UMAC_MDF_ADDR 0x654 +#define UMAC_MIB_CTRL 0x580 +#define MIB_RESET_RX (1 << 0) +#define MIB_RESET_RUNT (1 << 1) +#define MIB_RESET_TX (1 << 2) + +#define RBUF_CTRL 0x00 +#define RBUF_64B_EN (1 << 0) +#define RBUF_ALIGN_2B (1 << 1) +#define RBUF_BAD_DIS (1 << 2) + +#define RBUF_STATUS 0x0C +#define RBUF_STATUS_WOL (1 << 0) +#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1) +#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2) + +#define RBUF_CHK_CTRL 0x14 +#define RBUF_RXCHK_EN (1 << 0) +#define RBUF_SKIP_FCS (1 << 4) +#define RBUF_L3_PARSE_DIS (1 << 5) + +#define RBUF_ENERGY_CTRL 0x9c +#define RBUF_EEE_EN (1 << 0) +#define RBUF_PM_EN (1 << 1) + +#define RBUF_TBUF_SIZE_CTRL 0xb4 + +#define RBUF_HFB_CTRL_V1 0x38 +#define RBUF_HFB_FILTER_EN_SHIFT 16 +#define RBUF_HFB_FILTER_EN_MASK 0xffff0000 +#define RBUF_HFB_EN (1 << 0) +#define RBUF_HFB_256B (1 << 1) +#define RBUF_ACPI_EN (1 << 2) + +#define RBUF_HFB_LEN_V1 0x3C +#define RBUF_FLTR_LEN_MASK 0xFF +#define RBUF_FLTR_LEN_SHIFT 8 + +#define TBUF_CTRL 0x00 +#define TBUF_64B_EN (1 << 0) +#define TBUF_BP_MC 0x0C +#define TBUF_ENERGY_CTRL 0x14 +#define TBUF_EEE_EN (1 << 0) +#define TBUF_PM_EN (1 << 1) + +#define TBUF_CTRL_V1 0x80 +#define TBUF_BP_MC_V1 0xA0 + +#define HFB_CTRL 0x00 +#define HFB_FLT_ENABLE_V3PLUS 0x04 +#define HFB_FLT_LEN_V2 0x04 +#define HFB_FLT_LEN_V3PLUS 0x1C + +/* uniMac intrl2 registers */ +#define INTRL2_CPU_STAT 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0C +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* INTRL2 instance 0 definitions */ +#define UMAC_IRQ_SCB (1 << 0) +#define UMAC_IRQ_EPHY (1 << 1) +#define UMAC_IRQ_PHY_DET_R (1 << 2) +#define UMAC_IRQ_PHY_DET_F (1 << 3) +#define UMAC_IRQ_LINK_UP (1 << 4) +#define UMAC_IRQ_LINK_DOWN (1 << 5) +#define UMAC_IRQ_LINK_EVENT (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN) +#define UMAC_IRQ_UMAC (1 << 6) +#define UMAC_IRQ_UMAC_TSV (1 << 7) +#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8) +#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9) +#define UMAC_IRQ_HFB_SM (1 << 10) +#define UMAC_IRQ_HFB_MM (1 << 11) +#define UMAC_IRQ_MPD_R (1 << 12) +#define UMAC_IRQ_WAKE_EVENT (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \ + UMAC_IRQ_MPD_R) +#define UMAC_IRQ_RXDMA_MBDONE (1 << 13) +#define UMAC_IRQ_RXDMA_PDONE (1 << 14) +#define UMAC_IRQ_RXDMA_BDONE (1 << 15) +#define UMAC_IRQ_RXDMA_DONE UMAC_IRQ_RXDMA_MBDONE +#define UMAC_IRQ_TXDMA_MBDONE (1 << 16) +#define UMAC_IRQ_TXDMA_PDONE (1 << 17) +#define UMAC_IRQ_TXDMA_BDONE (1 << 18) +#define UMAC_IRQ_TXDMA_DONE UMAC_IRQ_TXDMA_MBDONE + +/* Only valid for GENETv3+ */ +#define UMAC_IRQ_MDIO_DONE (1 << 23) +#define UMAC_IRQ_MDIO_ERROR (1 << 24) + +/* INTRL2 instance 1 definitions */ +#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_SHIFT 16 + +/* Register block offsets */ +#define GENET_SYS_OFF 0x0000 +#define GENET_GR_BRIDGE_OFF 0x0040 +#define GENET_EXT_OFF 0x0080 +#define GENET_INTRL2_0_OFF 0x0200 +#define GENET_INTRL2_1_OFF 0x0240 +#define GENET_RBUF_OFF 0x0300 +#define GENET_UMAC_OFF 0x0800 + +/* SYS block offsets and register definitions */ +#define SYS_REV_CTRL 0x00 +#define SYS_PORT_CTRL 0x04 +#define PORT_MODE_INT_EPHY 0 +#define PORT_MODE_INT_GPHY 1 +#define PORT_MODE_EXT_EPHY 2 +#define PORT_MODE_EXT_GPHY 3 +#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4)) +#define PORT_MODE_EXT_RVMII_50 4 +#define LED_ACT_SOURCE_MAC (1 << 9) + +#define SYS_RBUF_FLUSH_CTRL 0x08 +#define SYS_TBUF_FLUSH_CTRL 0x0C +#define RBUF_FLUSH_CTRL_V1 0x04 + +/* Ext block register offsets and definitions */ +#define EXT_EXT_PWR_MGMT 0x00 +#define EXT_PWR_DOWN_BIAS (1 << 0) +#define EXT_PWR_DOWN_DLL (1 << 1) +#define EXT_PWR_DOWN_PHY (1 << 2) +#define EXT_PWR_DN_EN_LD (1 << 3) +#define EXT_ENERGY_DET (1 << 4) +#define EXT_IDDQ_FROM_PHY (1 << 5) +#define EXT_IDDQ_GLBL_PWR (1 << 7) +#define EXT_PHY_RESET (1 << 8) +#define EXT_ENERGY_DET_MASK (1 << 12) +#define EXT_PWR_DOWN_PHY_TX (1 << 16) +#define EXT_PWR_DOWN_PHY_RX (1 << 17) +#define EXT_PWR_DOWN_PHY_SD (1 << 18) +#define EXT_PWR_DOWN_PHY_RD (1 << 19) +#define EXT_PWR_DOWN_PHY_EN (1 << 20) + +#define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN_V123 (1 << 0) +#define RGMII_LINK (1 << 4) +#define OOB_DISABLE (1 << 5) +#define RGMII_MODE_EN (1 << 6) +#define ID_MODE_DIS (1 << 16) + +#define EXT_GPHY_CTRL 0x1C +#define EXT_CFG_IDDQ_BIAS (1 << 0) +#define EXT_CFG_PWR_DOWN (1 << 1) +#define EXT_CK25_DIS (1 << 4) +#define EXT_GPHY_RESET (1 << 5) + +/* DMA rings size */ +#define DMA_RING_SIZE (0x40) +#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1)) + +/* DMA registers common definitions */ +#define DMA_RW_POINTER_MASK 0x1FF +#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF +#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16 +#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF +#define DMA_BUFFER_DONE_CNT_SHIFT 16 +#define DMA_P_INDEX_MASK 0xFFFF +#define DMA_C_INDEX_MASK 0xFFFF + +/* DMA ring size register */ +#define DMA_RING_SIZE_MASK 0xFFFF +#define DMA_RING_SIZE_SHIFT 16 +#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF + +/* DMA interrupt threshold register */ +#define DMA_INTR_THRESHOLD_MASK 0x01FF + +/* DMA XON/XOFF register */ +#define DMA_XON_THREHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_SHIFT 16 + +/* DMA flow period register */ +#define DMA_FLOW_PERIOD_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_SHIFT 16 + + +/* DMA control register */ +#define DMA_EN (1 << 0) +#define DMA_RING_BUF_EN_SHIFT 0x01 +#define DMA_RING_BUF_EN_MASK 0xFFFF +#define DMA_TSB_SWAP_EN (1 << 20) + +/* DMA status register */ +#define DMA_DISABLED (1 << 0) +#define DMA_DESC_RAM_INIT_BUSY (1 << 1) + +/* DMA SCB burst size register */ +#define DMA_SCB_BURST_SIZE_MASK 0x1F + +/* DMA activity vector register */ +#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF + +/* DMA backpressure mask register */ +#define DMA_BACKPRESSURE_MASK 0x1FFFF +#define DMA_PFC_ENABLE (1 << 31) + +/* DMA backpressure status register */ +#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF + +/* DMA override register */ +#define DMA_LITTLE_ENDIAN_MODE (1 << 0) +#define DMA_REGISTER_MODE (1 << 1) + +/* DMA timeout register */ +#define DMA_TIMEOUT_MASK 0xFFFF +#define DMA_TIMEOUT_VAL 5000 /* micro seconds */ + +/* TDMA rate limiting control register */ +#define DMA_RATE_LIMIT_EN_MASK 0xFFFF + +/* TDMA arbitration control register */ +#define DMA_ARBITER_MODE_MASK 0x03 +#define DMA_RING_BUF_PRIORITY_MASK 0x1F +#define DMA_RING_BUF_PRIORITY_SHIFT 5 +#define DMA_PRIO_REG_INDEX(q) ((q) / 6) +#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT) +#define DMA_RATE_ADJ_MASK 0xFF + +/* Tx/Rx Dma Descriptor common bits*/ +#define DMA_BUFLENGTH_MASK 0x0fff +#define DMA_BUFLENGTH_SHIFT 16 +#define DMA_OWN 0x8000 +#define DMA_EOP 0x4000 +#define DMA_SOP 0x2000 +#define DMA_WRAP 0x1000 +/* Tx specific Dma descriptor bits */ +#define DMA_TX_UNDERRUN 0x0200 +#define DMA_TX_APPEND_CRC 0x0040 +#define DMA_TX_OW_CRC 0x0020 +#define DMA_TX_DO_CSUM 0x0010 +#define DMA_TX_QTAG_SHIFT 7 + +/* Rx Specific Dma descriptor bits */ +#define DMA_RX_CHK_V3PLUS 0x8000 +#define DMA_RX_CHK_V12 0x1000 +#define DMA_RX_BRDCAST 0x0040 +#define DMA_RX_MULT 0x0020 +#define DMA_RX_LG 0x0010 +#define DMA_RX_NO 0x0008 +#define DMA_RX_RXER 0x0004 +#define DMA_RX_CRC_ERROR 0x0002 +#define DMA_RX_OV 0x0001 +#define DMA_RX_FI_MASK 0x001F +#define DMA_RX_FI_SHIFT 0x0007 +#define DMA_DESC_ALLOC_MASK 0x00FF + +#define DMA_ARBITER_RR 0x00 +#define DMA_ARBITER_WRR 0x01 +#define DMA_ARBITER_SP 0x02 + +struct enet_cb { + struct sk_buff *skb; + void __iomem *bd_addr; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* power management mode */ +enum bcmgenet_power_mode { + GENET_POWER_CABLE_SENSE = 0, + GENET_POWER_PASSIVE, + GENET_POWER_WOL_MAGIC, +}; + +struct bcmgenet_priv; + +/* We support both runtime GENET detection and compile-time + * to optimize code-paths for a given hardware + */ +enum bcmgenet_version { + GENET_V1 = 1, + GENET_V2, + GENET_V3, + GENET_V4, + GENET_V5 +}; + +#define GENET_IS_V1(p) ((p)->version == GENET_V1) +#define GENET_IS_V2(p) ((p)->version == GENET_V2) +#define GENET_IS_V3(p) ((p)->version == GENET_V3) +#define GENET_IS_V4(p) ((p)->version == GENET_V4) +#define GENET_IS_V5(p) ((p)->version == GENET_V5) + +/* Hardware flags */ +#define GENET_HAS_40BITS (1 << 0) +#define GENET_HAS_EXT (1 << 1) +#define GENET_HAS_MDIO_INTR (1 << 2) +#define GENET_HAS_MOCA_LINK_DET (1 << 3) + +/* BCMGENET hardware parameters, keep this structure nicely aligned + * since it is going to be used in hot paths + */ +struct bcmgenet_hw_params { + u8 tx_queues; + u8 tx_bds_per_q; + u8 rx_queues; + u8 rx_bds_per_q; + u8 bp_in_en_shift; + u32 bp_in_mask; + u8 hfb_filter_cnt; + u8 hfb_filter_size; + u8 qtag_mask; + u16 tbuf_offset; + u32 hfb_offset; + u32 hfb_reg_offset; + u32 rdma_offset; + u32 tdma_offset; + u32 words_per_bd; + u32 flags; +}; + +struct bcmgenet_skb_cb { + struct enet_cb *first_cb; /* First control block of SKB */ + struct enet_cb *last_cb; /* Last control block of SKB */ + unsigned int bytes_sent; /* bytes on the wire (no TSB) */ +}; + +#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb)) + +struct bcmgenet_tx_ring { + spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ + unsigned long packets; + unsigned long bytes; + unsigned int index; /* ring index */ + unsigned int queue; /* queue index */ + struct enet_cb *cbs; /* tx ring buffer control block*/ + unsigned int size; /* size of each tx ring */ + unsigned int clean_ptr; /* Tx ring clean pointer */ + unsigned int c_index; /* last consumer index of each ring*/ + unsigned int free_bds; /* # of free bds for each ring */ + unsigned int write_ptr; /* Tx ring write pointer SW copy */ + unsigned int prod_index; /* Tx ring producer index SW copy */ + unsigned int cb_ptr; /* Tx ring initial CB ptr */ + unsigned int end_ptr; /* Tx ring end CB ptr */ + void (*int_enable)(struct bcmgenet_tx_ring *); + void (*int_disable)(struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; +}; + +struct bcmgenet_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct dim dim; +}; + +struct bcmgenet_rx_ring { + struct napi_struct napi; /* Rx NAPI struct */ + unsigned long bytes; + unsigned long packets; + unsigned long errors; + unsigned long dropped; + unsigned int index; /* Rx ring index */ + struct enet_cb *cbs; /* Rx ring buffer control block */ + unsigned int size; /* Rx ring size */ + unsigned int c_index; /* Rx last consumer index */ + unsigned int read_ptr; /* Rx ring read pointer */ + unsigned int cb_ptr; /* Rx ring initial CB ptr */ + unsigned int end_ptr; /* Rx ring end CB ptr */ + unsigned int old_discards; + struct bcmgenet_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; + void (*int_enable)(struct bcmgenet_rx_ring *); + void (*int_disable)(struct bcmgenet_rx_ring *); + struct bcmgenet_priv *priv; +}; + +enum bcmgenet_rxnfc_state { + BCMGENET_RXNFC_STATE_UNUSED = 0, + BCMGENET_RXNFC_STATE_DISABLED, + BCMGENET_RXNFC_STATE_ENABLED +}; + +struct bcmgenet_rxnfc_rule { + struct list_head list; + struct ethtool_rx_flow_spec fs; + enum bcmgenet_rxnfc_state state; +}; + +/* device context */ +struct bcmgenet_priv { + void __iomem *base; + enum bcmgenet_version version; + struct net_device *dev; + + /* transmit variables */ + void __iomem *tx_bds; + struct enet_cb *tx_cbs; + unsigned int num_tx_bds; + + struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + + /* receive variables */ + void __iomem *rx_bds; + struct enet_cb *rx_cbs; + unsigned int num_rx_bds; + unsigned int rx_buf_len; + struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES]; + struct list_head rxnfc_list; + + struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; + + /* other misc variables */ + struct bcmgenet_hw_params *hw_params; + + /* MDIO bus variables */ + wait_queue_head_t wq; + bool internal_phy; + struct device_node *phy_dn; + struct device_node *mdio_dn; + struct mii_bus *mii_bus; + u16 gphy_rev; + struct clk *clk_eee; + bool clk_eee_enabled; + + /* PHY device variables */ + int old_link; + int old_speed; + int old_duplex; + int old_pause; + phy_interface_t phy_interface; + int phy_addr; + int ext_phy; + + /* Interrupt variables */ + struct work_struct bcmgenet_irq_work; + int irq0; + int irq1; + int wol_irq; + bool wol_irq_disabled; + + /* shared status */ + spinlock_t lock; + unsigned int irq0_stat; + + /* HW descriptors/checksum variables */ + bool crc_fwd_en; + + u32 dma_max_burst_length; + + u32 msg_enable; + + struct clk *clk; + struct platform_device *pdev; + struct platform_device *mii_pdev; + + /* WOL */ + struct clk *clk_wol; + u32 wolopts; + u8 sopass[SOPASS_MAX]; + bool wol_active; + + struct bcmgenet_mib_counters mib; + + struct ethtool_eee eee; + /* EtherCAT device variables */ + ec_device_t *ecdev; +}; + +#define GENET_IO_MACRO(name, offset) \ +static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ + u32 off) \ +{ \ + /* MIPS chips strapped for BE will automagically configure the \ + * peripheral registers for CPU-native byte order. \ + */ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + return __raw_readl(priv->base + offset + off); \ + else \ + return readl_relaxed(priv->base + offset + off); \ +} \ +static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ + u32 val, u32 off) \ +{ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + __raw_writel(val, priv->base + offset + off); \ + else \ + writel_relaxed(val, priv->base + offset + off); \ +} + +GENET_IO_MACRO(ext, GENET_EXT_OFF); +GENET_IO_MACRO(umac, GENET_UMAC_OFF); +GENET_IO_MACRO(sys, GENET_SYS_OFF); + +/* interrupt l2 registers accessors */ +GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF); +GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF); + +/* HFB register accessors */ +GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset); + +/* GENET v2+ HFB control and filter len helpers */ +GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset); + +/* RBUF register accessors */ +GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); + +/* MDIO routines */ +int bcmgenet_mii_init(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_probe(struct net_device *dev); +void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_phy_power_set(struct net_device *dev, bool enable); +void bcmgenet_mii_setup(struct net_device *dev); + +/* Wake-on-LAN routines */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); + +#endif /* __BCMGENET_H__ */ diff --git a/devices/genet/bcmgenet-5.10-orig.c b/devices/genet/bcmgenet-5.10-orig.c new file mode 100644 index 00000000..7dcd5613 --- /dev/null +++ b/devices/genet/bcmgenet-5.10-orig.c @@ -0,0 +1,4296 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) controller driver + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bcmgenet.h" + +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT 4 + +/* Default highest priority queue for multi queue support */ +#define GENET_Q0_PRIORITY 0 + +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) + +#define RX_BUF_LENGTH 2048 +#define SKB_ALIGNMENT 32 + +/* Tx/Rx DMA register offset, skip 256 descriptors */ +#define WORDS_PER_BD(p) (p->hw_params->words_per_bd) +#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) + +#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +/* Forward declarations */ +static void bcmgenet_set_rx_mode(struct net_device *dev); + +static inline void bcmgenet_writel(u32 value, void __iomem *offset) +{ + /* MIPS chips strapped for BE will automagically configure the + * peripheral registers for CPU-native byte order. + */ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(value, offset); + else + writel(value, offset); +} + +static inline u32 bcmgenet_readl(void __iomem *offset) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(offset); + else + return readl(offset); +} + +static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, + void __iomem *d, u32 value) +{ + bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); +} + +static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, + void __iomem *d, + dma_addr_t addr) +{ + bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); +#endif +} + +/* Combined address + length/status setter */ +static inline void dmadesc_set(struct bcmgenet_priv *priv, + void __iomem *d, dma_addr_t addr, u32 val) +{ + dmadesc_set_addr(priv, d, addr); + dmadesc_set_length_status(priv, d, val); +} + +static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, + void __iomem *d) +{ + dma_addr_t addr; + + addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32; +#endif + return addr; +} + +#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" + +#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); + else + return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); +} + +static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); + else + bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); +} + +/* These macros are defined to deal with register map change + * between GENET1.1 and GENET2. Only those currently being used + * by driver are defined. + */ +static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +/* RX/TX DMA register accessors */ +enum dma_reg { + DMA_RING_CFG = 0, + DMA_CTRL, + DMA_STATUS, + DMA_SCB_BURST_SIZE, + DMA_ARB_CTRL, + DMA_PRIORITY_0, + DMA_PRIORITY_1, + DMA_PRIORITY_2, + DMA_INDEX2RING_0, + DMA_INDEX2RING_1, + DMA_INDEX2RING_2, + DMA_INDEX2RING_3, + DMA_INDEX2RING_4, + DMA_INDEX2RING_5, + DMA_INDEX2RING_6, + DMA_INDEX2RING_7, + DMA_RING0_TIMEOUT, + DMA_RING1_TIMEOUT, + DMA_RING2_TIMEOUT, + DMA_RING3_TIMEOUT, + DMA_RING4_TIMEOUT, + DMA_RING5_TIMEOUT, + DMA_RING6_TIMEOUT, + DMA_RING7_TIMEOUT, + DMA_RING8_TIMEOUT, + DMA_RING9_TIMEOUT, + DMA_RING10_TIMEOUT, + DMA_RING11_TIMEOUT, + DMA_RING12_TIMEOUT, + DMA_RING13_TIMEOUT, + DMA_RING14_TIMEOUT, + DMA_RING15_TIMEOUT, + DMA_RING16_TIMEOUT, +}; + +static const u8 bcmgenet_dma_regs_v3plus[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x2C, + [DMA_PRIORITY_0] = 0x30, + [DMA_PRIORITY_1] = 0x34, + [DMA_PRIORITY_2] = 0x38, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, + [DMA_INDEX2RING_0] = 0x70, + [DMA_INDEX2RING_1] = 0x74, + [DMA_INDEX2RING_2] = 0x78, + [DMA_INDEX2RING_3] = 0x7C, + [DMA_INDEX2RING_4] = 0x80, + [DMA_INDEX2RING_5] = 0x84, + [DMA_INDEX2RING_6] = 0x88, + [DMA_INDEX2RING_7] = 0x8C, +}; + +static const u8 bcmgenet_dma_regs_v2[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +static const u8 bcmgenet_dma_regs_v1[] = { + [DMA_CTRL] = 0x00, + [DMA_STATUS] = 0x04, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +/* Set at runtime once bcmgenet version is known */ +static const u8 *bcmgenet_dma_regs; + +static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) +{ + return netdev_priv(dev_get_drvdata(dev)); +} + +static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +/* RDMA/TDMA ring registers and accessors + * we merge the common fields and just prefix with T/D the registers + * having different meaning depending on the direction + */ +enum dma_ring_reg { + TDMA_READ_PTR = 0, + RDMA_WRITE_PTR = TDMA_READ_PTR, + TDMA_READ_PTR_HI, + RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, + TDMA_CONS_INDEX, + RDMA_PROD_INDEX = TDMA_CONS_INDEX, + TDMA_PROD_INDEX, + RDMA_CONS_INDEX = TDMA_PROD_INDEX, + DMA_RING_BUF_SIZE, + DMA_START_ADDR, + DMA_START_ADDR_HI, + DMA_END_ADDR, + DMA_END_ADDR_HI, + DMA_MBUF_DONE_THRESH, + TDMA_FLOW_PERIOD, + RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, + TDMA_WRITE_PTR, + RDMA_READ_PTR = TDMA_WRITE_PTR, + TDMA_WRITE_PTR_HI, + RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI +}; + +/* GENET v4 supports 40-bits pointer addressing + * for obvious reasons the LO and HI word parts + * are contiguous, but this offsets the other + * registers. + */ +static const u8 genet_dma_ring_regs_v4[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_READ_PTR_HI] = 0x04, + [TDMA_CONS_INDEX] = 0x08, + [TDMA_PROD_INDEX] = 0x0C, + [DMA_RING_BUF_SIZE] = 0x10, + [DMA_START_ADDR] = 0x14, + [DMA_START_ADDR_HI] = 0x18, + [DMA_END_ADDR] = 0x1C, + [DMA_END_ADDR_HI] = 0x20, + [DMA_MBUF_DONE_THRESH] = 0x24, + [TDMA_FLOW_PERIOD] = 0x28, + [TDMA_WRITE_PTR] = 0x2C, + [TDMA_WRITE_PTR_HI] = 0x30, +}; + +static const u8 genet_dma_ring_regs_v123[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_CONS_INDEX] = 0x04, + [TDMA_PROD_INDEX] = 0x08, + [DMA_RING_BUF_SIZE] = 0x0C, + [DMA_START_ADDR] = 0x10, + [DMA_END_ADDR] = 0x14, + [DMA_MBUF_DONE_THRESH] = 0x18, + [TDMA_FLOW_PERIOD] = 0x1C, + [TDMA_WRITE_PTR] = 0x20, +}; + +/* Set at runtime once GENET version is known */ +static const u8 *genet_dma_ring_regs; + +static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg |= RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); +} + +static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset, reg, reg1; + + offset = HFB_FLT_ENABLE_V3PLUS; + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + if (f_index < 32) { + reg1 &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32)); + } else { + reg &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + } + if (!reg && !reg1) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg &= ~RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } +} + +static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, + u32 f_index, u32 rx_queue) +{ + u32 offset; + u32 reg; + + offset = f_index / 8; + reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); + reg &= ~(0xF << (4 * (f_index % 8))); + reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); + bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); +} + +static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, + u32 f_index, u32 f_length) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_LEN_V3PLUS + + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * + sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg &= ~(0xFF << (8 * (f_index % 4))); + reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static int bcmgenet_hfb_validate_mask(void *mask, size_t size) +{ + while (size) { + switch (*(unsigned char *)mask++) { + case 0x00: + case 0x0f: + case 0xf0: + case 0xff: + size--; + continue; + default: + return -EINVAL; + } + } + + return 0; +} + +#define VALIDATE_MASK(x) \ + bcmgenet_hfb_validate_mask(&(x), sizeof(x)) + +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, + u32 offset, void *val, void *mask, + size_t size) +{ + u32 index, tmp; + + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); + + while (size--) { + if (offset++ & 1) { + tmp &= ~0x300FF; + tmp |= (*(unsigned char *)val++); + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0x30000; + break; + case 0xF0: + tmp |= 0x20000; + break; + case 0x0F: + tmp |= 0x10000; + break; + } + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); + if (size) + tmp = bcmgenet_hfb_readl(priv, + index * sizeof(u32)); + } else { + tmp &= ~0xCFF00; + tmp |= (*(unsigned char *)val++) << 8; + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0xC0000; + break; + case 0xF0: + tmp |= 0x80000; + break; + case 0x0F: + tmp |= 0x40000; + break; + } + if (!size) + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); + } + } + + return 0; +} + +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, + struct bcmgenet_rxnfc_rule *rule) +{ + struct ethtool_rx_flow_spec *fs = &rule->fs; + u32 offset = 0, f_length = 0, f; + u8 val_8, mask_8; + __be16 val_16; + u16 mask_16; + size_t size; + + f = fs->location; + if (fs->flow_type & FLOW_MAC_EXT) { + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_ext.h_dest, &fs->m_ext.h_dest, + sizeof(fs->h_ext.h_dest)); + } + + if (fs->flow_type & FLOW_EXT) { + if (fs->m_ext.vlan_etype || + fs->m_ext.vlan_tci) { + bcmgenet_hfb_insert_data(priv, f, 12, + &fs->h_ext.vlan_etype, + &fs->m_ext.vlan_etype, + sizeof(fs->h_ext.vlan_etype)); + bcmgenet_hfb_insert_data(priv, f, 14, + &fs->h_ext.vlan_tci, + &fs->m_ext.vlan_tci, + sizeof(fs->h_ext.vlan_tci)); + offset += VLAN_HLEN; + f_length += DIV_ROUND_UP(VLAN_HLEN, 2); + } + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN, 2); + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_u.ether_spec.h_dest, + &fs->m_u.ether_spec.h_dest, + sizeof(fs->h_u.ether_spec.h_dest)); + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, + &fs->h_u.ether_spec.h_source, + &fs->m_u.ether_spec.h_source, + sizeof(fs->h_u.ether_spec.h_source)); + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &fs->h_u.ether_spec.h_proto, + &fs->m_u.ether_spec.h_proto, + sizeof(fs->h_u.ether_spec.h_proto)); + break; + case IP_USER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2); + /* Specify IP Ether Type */ + val_16 = htons(ETH_P_IP); + mask_16 = 0xFFFF; + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmgenet_hfb_insert_data(priv, f, 15 + offset, + &fs->h_u.usr_ip4_spec.tos, + &fs->m_u.usr_ip4_spec.tos, + sizeof(fs->h_u.usr_ip4_spec.tos)); + bcmgenet_hfb_insert_data(priv, f, 23 + offset, + &fs->h_u.usr_ip4_spec.proto, + &fs->m_u.usr_ip4_spec.proto, + sizeof(fs->h_u.usr_ip4_spec.proto)); + bcmgenet_hfb_insert_data(priv, f, 26 + offset, + &fs->h_u.usr_ip4_spec.ip4src, + &fs->m_u.usr_ip4_spec.ip4src, + sizeof(fs->h_u.usr_ip4_spec.ip4src)); + bcmgenet_hfb_insert_data(priv, f, 30 + offset, + &fs->h_u.usr_ip4_spec.ip4dst, + &fs->m_u.usr_ip4_spec.ip4dst, + sizeof(fs->h_u.usr_ip4_spec.ip4dst)); + if (!fs->m_u.usr_ip4_spec.l4_4_bytes) + break; + + /* Only supports 20 byte IPv4 header */ + val_8 = 0x45; + mask_8 = 0xFF; + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, + &val_8, &mask_8, + sizeof(val_8)); + size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); + bcmgenet_hfb_insert_data(priv, f, + ETH_HLEN + 20 + offset, + &fs->h_u.usr_ip4_spec.l4_4_bytes, + &fs->m_u.usr_ip4_spec.l4_4_bytes, + size); + f_length += DIV_ROUND_UP(size, 2); + break; + } + + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); + if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { + /* Ring 0 flows can be handled by the default Descriptor Ring + * We'll map them to ring 0, but don't enable the filter + */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); + rule->state = BCMGENET_RXNFC_STATE_DISABLED; + } else { + /* Other Rx rings are direct mapped here */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, + fs->ring_cookie); + bcmgenet_hfb_enable_filter(priv, f); + rule->state = BCMGENET_RXNFC_STATE_ENABLED; + } +} + +/* bcmgenet_hfb_clear + * + * Clear Hardware Filter Block and disable all filtering. + */ +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 base, i; + + base = f_index * priv->hw_params->hfb_filter_size; + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); +} + +static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) +{ + u32 i; + + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); + + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0x0, i); + + for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) + bcmgenet_hfb_reg_writel(priv, 0x0, + HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) + bcmgenet_hfb_clear_filter(priv, i); +} + +static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) +{ + int i; + + INIT_LIST_HEAD(&priv->rxnfc_list); + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); + priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; + } + + bcmgenet_hfb_clear(priv); +} + +static int bcmgenet_begin(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn on the clock */ + return clk_prepare_enable(priv->clk); +} + +static void bcmgenet_complete(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn off the clock */ + clk_disable_unprepare(priv->clk); +} + +static int bcmgenet_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + phy_ethtool_ksettings_get(dev->phydev, cmd); + + return 0; +} + +static int bcmgenet_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + return phy_ethtool_ksettings_set(dev->phydev, cmd); +} + +static int bcmgenet_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Make sure we reflect the value of CRC_CMD_FWD */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static u32 bcmgenet_get_msglevel(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + priv->msg_enable = level; +} + +static int bcmgenet_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rx_ring *ring; + unsigned int i; + + ec->tx_max_coalesced_frames = + bcmgenet_tdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_max_coalesced_frames = + bcmgenet_rdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_coalesce_usecs = + bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; + + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ring = &priv->rx_rings[i]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + } + ring = &priv->rx_rings[DESC_INDEX]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + + return 0; +} + +static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, + u32 usecs, u32 pkts) +{ + struct bcmgenet_priv *priv = ring->priv; + unsigned int i = ring->index; + u32 reg; + + bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH); + + reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); + reg &= ~DMA_TIMEOUT_MASK; + reg |= DIV_ROUND_UP(usecs * 1000, 8192); + bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); +} + +static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct dim_cq_moder moder; + u32 usecs, pkts; + + ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; + ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { + moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + ring->dim.use_dim = ec->use_adaptive_rx_coalesce; + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +static int bcmgenet_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int i; + + /* Base system clock is 125Mhz, DMA timeout is this reference clock + * divided by 1024, which yields roughly 8.192us, our maximum value + * has to fit in the DMA_TIMEOUT_MASK (16 bits) + */ + if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->tx_max_coalesced_frames == 0 || + ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) + return -EINVAL; + + /* GENET TDMA hardware does not support a configurable timeout, but will + * always generate an interrupt either after MBDONE packets have been + * transmitted, or when the ring is empty. + */ + + /* Program all TX queues with the same values, as there is no + * ethtool knob to do coalescing on a per-queue basis + */ + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tdma_ring_writel(priv, i, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + bcmgenet_tdma_ring_writel(priv, DESC_INDEX, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + + for (i = 0; i < priv->hw_params->rx_queues; i++) + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); + + return 0; +} + +/* standard ethtool support functions. */ +enum bcmgenet_stat_type { + BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_MIB_RX, + BCMGENET_STAT_MIB_TX, + BCMGENET_STAT_RUNT, + BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, +}; + +struct bcmgenet_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_sizeof; + int stat_offset; + enum bcmgenet_stat_type type; + /* reg offset from UMAC base for misc counters */ + u16 reg_offset; +}; + +#define STAT_NETDEV(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ + .stat_offset = offsetof(struct net_device_stats, m), \ + .type = BCMGENET_STAT_NETDEV, \ +} + +#define STAT_GENET_MIB(str, m, _type) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = _type, \ +} + +#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) +#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) +#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) + +#define STAT_GENET_MISC(str, m, offset) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = BCMGENET_STAT_MISC, \ + .reg_offset = offset, \ +} + +#define STAT_GENET_Q(num) \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ + tx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ + tx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ + rx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ + rx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ + rx_rings[num].errors), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ + rx_rings[num].dropped) + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define BCMGENET_STAT_OFFSET 0xc + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { + /* general stats */ + STAT_NETDEV(rx_packets), + STAT_NETDEV(tx_packets), + STAT_NETDEV(rx_bytes), + STAT_NETDEV(tx_bytes), + STAT_NETDEV(rx_errors), + STAT_NETDEV(tx_errors), + STAT_NETDEV(rx_dropped), + STAT_NETDEV(tx_dropped), + STAT_NETDEV(multicast), + /* UniMAC RSV counters */ + STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), + STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), + STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), + STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), + STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), + STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), + STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), + STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), + STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), + STAT_GENET_MIB_RX("rx_control", mib.rx.cf), + STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), + STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), + STAT_GENET_MIB_RX("rx_align", mib.rx.aln), + STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), + STAT_GENET_MIB_RX("rx_code", mib.rx.cde), + STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), + STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), + STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), + STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), + STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), + STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), + STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), + STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), + /* UniMAC TSV counters */ + STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), + STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), + STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), + STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), + STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), + STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), + STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), + STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), + STAT_GENET_MIB_TX("tx_control", mib.tx.cf), + STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), + STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), + STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), + STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), + STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), + STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), + STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), + STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), + STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), + STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), + STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), + STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), + STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), + STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), + /* UniMAC RUNT counters */ + STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), + STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), + STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), + STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* Misc UniMAC counters */ + STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, + UMAC_RBUF_OVFL_CNT_V1), + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, + UMAC_RBUF_ERR_CNT_V1), + STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb), + STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed", + mib.tx_realloc_tsb_failed), + /* Per TX queues */ + STAT_GENET_Q(0), + STAT_GENET_Q(1), + STAT_GENET_Q(2), + STAT_GENET_Q(3), + STAT_GENET_Q(16), +}; + +#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) + +static void bcmgenet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); +} + +static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMGENET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmgenet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) +{ + u16 new_offset; + u32 val; + + switch (offset) { + case UMAC_RBUF_OVFL_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_OVFL_CNT_V2; + else + new_offset = RBUF_OVFL_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + case UMAC_RBUF_ERR_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_ERR_CNT_V2; + else + new_offset = RBUF_ERR_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + default: + val = bcmgenet_umac_readl(priv, offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, offset); + break; + } + + return val; +} + +static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) +{ + int i, j = 0; + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + u8 offset = 0; + u32 val = 0; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + switch (s->type) { + case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: + continue; + case BCMGENET_STAT_RUNT: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_TX: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_RX: + val = bcmgenet_umac_readl(priv, + UMAC_MIB_START + j + offset); + offset = 0; /* Reset Offset */ + break; + case BCMGENET_STAT_MISC: + if (GENET_IS_V1(priv)) { + val = bcmgenet_umac_readl(priv, s->reg_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, + s->reg_offset); + } else { + val = bcmgenet_update_stat_misc(priv, + s->reg_offset); + } + break; + } + + j += s->stat_sizeof; + p = (char *)priv + s->stat_offset; + *(u32 *)p = val; + } +} + +static void bcmgenet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_running(dev)) + bcmgenet_update_mib_counters(priv); + + dev->netdev_ops->ndo_get_stats(dev); + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + if (s->type == BCMGENET_STAT_NETDEV) + p = (char *)&dev->stats; + else + p = (char *)priv; + p += s->stat_offset; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; + } +} + +static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; + u32 reg; + + if (enable && !priv->clk_eee_enabled) { + clk_prepare_enable(priv->clk_eee); + priv->clk_eee_enabled = true; + } + + reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = bcmgenet_readl(priv->base + off); + if (enable) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); + bcmgenet_writel(reg, priv->base + off); + + /* Do the same for thing for RBUF */ + reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); + if (enable) + reg |= RBUF_EEE_EN | RBUF_PM_EN; + else + reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); + bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); + + if (!enable && priv->clk_eee_enabled) { + clk_disable_unprepare(priv->clk_eee); + priv->clk_eee_enabled = false; + } + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; +} + +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(dev->phydev, e); +} + +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + int ret = 0; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcmgenet_eee_enable_set(dev, false); + } else { + ret = phy_init_eee(dev->phydev, 0); + if (ret) { + netif_err(priv, hw, dev, "EEE initialization failed\n"); + return ret; + } + + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); + bcmgenet_eee_enable_set(dev, true); + } + + return phy_ethtool_set_eee(dev->phydev, e); +} + +static int bcmgenet_validate_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_usrip4_spec *l4_mask; + struct ethhdr *eth_mask; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) { + netdev_err(dev, "rxnfc: Invalid location (%d)\n", + cmd->fs.location); + return -EINVAL; + } + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case IP_USER_FLOW: + l4_mask = &cmd->fs.m_u.usr_ip4_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(l4_mask->ip4src) || + VALIDATE_MASK(l4_mask->ip4dst) || + VALIDATE_MASK(l4_mask->l4_4_bytes) || + VALIDATE_MASK(l4_mask->proto) || + VALIDATE_MASK(l4_mask->ip_ver) || + VALIDATE_MASK(l4_mask->tos)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + case ETHER_FLOW: + eth_mask = &cmd->fs.m_u.ether_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(eth_mask->h_dest) || + VALIDATE_MASK(eth_mask->h_source) || + VALIDATE_MASK(eth_mask->h_proto)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + default: + netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n", + cmd->fs.flow_type); + return -EINVAL; + } + + if ((cmd->fs.flow_type & FLOW_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || + VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { + netdev_err(dev, "rxnfc: user-def not supported\n"); + return -EINVAL; + } + } + + if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + } + + return 0; +} + +static int bcmgenet_insert_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *loc_rule; + int err; + + if (priv->hw_params->hfb_filter_size < 128) { + netdev_err(dev, "rxnfc: Not supported by this device\n"); + return -EINVAL; + } + + if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && + cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { + netdev_err(dev, "rxnfc: Unsupported action (%llu)\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + + err = bcmgenet_validate_flow(dev, cmd); + if (err) + return err; + + loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&loc_rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memcpy(&loc_rule->fs, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); + + list_add_tail(&loc_rule->list, &priv->rxnfc_list); + + return 0; +} + +static int bcmgenet_delete_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[cmd->fs.location]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { + err = -ENOENT; + goto out; + } + + if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); + +out: + return err; +} + +static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = bcmgenet_insert_flow(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = bcmgenet_delete_flow(dev, cmd); + break; + default: + netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", + cmd->cmd); + return -EINVAL; + } + + return err; +} + +static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, + int loc) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[loc]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) + err = -ENOENT; + else + memcpy(&cmd->fs, &rule->fs, + sizeof(struct ethtool_rx_flow_spec)); + + return err; +} + +static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv) +{ + struct list_head *pos; + int res = 0; + + list_for_each(pos, &priv->rxnfc_list) + res++; + + return res; +} + +static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + int i = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->hw_params->rx_queues ?: 1; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bcmgenet_get_num_flows(priv); + cmd->data = MAX_NUM_OF_FS_RULES; + break; + case ETHTOOL_GRXCLSRULE: + err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (i < cmd->rule_cnt) + rule_locs[i++] = rule->fs.location; + cmd->rule_cnt = i; + cmd->data = MAX_NUM_OF_FS_RULES; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +/* standard ethtool support functions. */ +static const struct ethtool_ops bcmgenet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .begin = bcmgenet_begin, + .complete = bcmgenet_complete, + .get_strings = bcmgenet_get_strings, + .get_sset_count = bcmgenet_get_sset_count, + .get_ethtool_stats = bcmgenet_get_ethtool_stats, + .get_drvinfo = bcmgenet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = bcmgenet_get_msglevel, + .set_msglevel = bcmgenet_set_msglevel, + .get_wol = bcmgenet_get_wol, + .set_wol = bcmgenet_set_wol, + .get_eee = bcmgenet_get_eee, + .set_eee = bcmgenet_set_eee, + .nway_reset = phy_ethtool_nway_reset, + .get_coalesce = bcmgenet_get_coalesce, + .set_coalesce = bcmgenet_set_coalesce, + .get_link_ksettings = bcmgenet_get_link_ksettings, + .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, + .get_rxnfc = bcmgenet_get_rxnfc, + .set_rxnfc = bcmgenet_set_rxnfc, +}; + +/* Power down the unimac, based on mode. */ +static int bcmgenet_power_down(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + int ret = 0; + u32 reg; + + switch (mode) { + case GENET_POWER_CABLE_SENSE: + phy_detach(priv->dev->phydev); + break; + + case GENET_POWER_WOL_MAGIC: + ret = bcmgenet_wol_power_down_cfg(priv, mode); + break; + + case GENET_POWER_PASSIVE: + /* Power down LED */ + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + if (GENET_IS_V5(priv)) + reg |= EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR; + else + reg |= EXT_PWR_DOWN_PHY; + + reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + + bcmgenet_phy_power_set(priv->dev, false); + } + break; + default: + break; + } + + return ret; +} + +static void bcmgenet_power_up(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (!(priv->hw_params->flags & GENET_HAS_EXT)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + + switch (mode) { + case GENET_POWER_PASSIVE: + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); + if (GENET_IS_V5(priv)) { + reg &= ~(EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR); + reg |= EXT_PHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + mdelay(1); + + reg &= ~EXT_PHY_RESET; + } else { + reg &= ~EXT_PWR_DOWN_PHY; + reg |= EXT_PWR_DN_EN_LD; + } + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_phy_power_set(priv->dev, true); + break; + + case GENET_POWER_CABLE_SENSE: + /* enable APD */ + if (!GENET_IS_V5(priv)) { + reg |= EXT_PWR_DN_EN_LD; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + break; + case GENET_POWER_WOL_MAGIC: + bcmgenet_wol_power_up_cfg(priv, mode); + return; + default: + break; + } +} + +static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Advancing local write pointer */ + if (ring->write_ptr == ring->end_ptr) + ring->write_ptr = ring->cb_ptr; + else + ring->write_ptr++; + + return tx_cb_ptr; +} + +static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Rewinding local write pointer */ + if (ring->write_ptr == ring->cb_ptr) + ring->write_ptr = ring->end_ptr; + else + ring->write_ptr--; + + return tx_cb_ptr; +} + +static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_SET); +} + +/* Simple helper to free a transmit control block's resources + * Returns an skb when the last transmit control block associated with the + * skb is freed. The skb should be freed by the caller if necessary. + */ +static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + + if (skb) { + cb->skb = NULL; + if (cb == GENET_CB(skb)->first_cb) + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + + if (cb == GENET_CB(skb)->last_cb) + return skb; + + } else if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_page(dev, + dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return NULL; +} + +/* Simple helper to free a receive control block's resources */ +static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + cb->skb = NULL; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return skb; +} + +/* Unlocked version of the reclaim routine */ +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int txbds_processed = 0; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; + unsigned int txbds_ready; + unsigned int c_index; + struct sk_buff *skb; + + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_CLEAR); + else + bcmgenet_intrl2_1_writel(priv, (1 << ring->index), + INTRL2_CPU_CLEAR); + + /* Compute how many buffers are transmitted since last xmit call */ + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) + & DMA_C_INDEX_MASK; + txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, tx_done, dev, + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); + + /* Reclaim transmitted buffers */ + while (txbds_processed < txbds_ready) { + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, + &priv->tx_cbs[ring->clean_ptr]); + if (skb) { + pkts_compl++; + bytes_compl += GENET_CB(skb)->bytes_sent; + dev_consume_skb_any(skb); + } + + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; + } + + ring->free_bds += txbds_processed; + ring->c_index = c_index; + + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + + netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), + pkts_compl, bytes_compl); + + return txbds_processed; +} + +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + unsigned int released; + + spin_lock_bh(&ring->lock); + released = __bcmgenet_tx_reclaim(dev, ring); + spin_unlock_bh(&ring->lock); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + struct netdev_queue *txq; + + spin_lock(&ring->lock); + work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); + netif_tx_wake_queue(txq); + } + spin_unlock(&ring->lock); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring); + + return 0; + } + + return budget; +} + +static void bcmgenet_tx_reclaim_all(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_is_multiqueue(dev)) { + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); + } + + bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); +} + +/* Reallocate the SKB to put enough headroom in front of it and insert + * the transmit checksum offsets in the descriptors + */ +static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, + struct sk_buff *skb) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct status_64 *status = NULL; + struct sk_buff *new_skb; + u16 offset; + u8 ip_proto; + __be16 ip_ver; + u32 tx_csum_info; + + if (unlikely(skb_headroom(skb) < sizeof(*status))) { + /* If 64 byte status block enabled, must make sure skb has + * enough headroom for us to insert 64B status block. + */ + new_skb = skb_realloc_headroom(skb, sizeof(*status)); + if (!new_skb) { + dev_kfree_skb_any(skb); + priv->mib.tx_realloc_tsb_failed++; + dev->stats.tx_dropped++; + return NULL; + } + dev_consume_skb_any(skb); + skb = new_skb; + priv->mib.tx_realloc_tsb++; + } + + skb_push(skb, sizeof(*status)); + status = (struct status_64 *)skb->data; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ip_ver = skb->protocol; + switch (ip_ver) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + /* don't use UDP flag */ + ip_proto = 0; + break; + } + + offset = skb_checksum_start_offset(skb) - sizeof(*status); + tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | + (offset + skb->csum_offset) | + STATUS_TX_CSUM_LV; + + /* Set the special UDP flag for UDP */ + if (ip_proto == IPPROTO_UDP) + tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; + + status->tx_csum_info = tx_csum_info; + } + + return skb; +} + +static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_tx_ring *ring = NULL; + struct enet_cb *tx_cb_ptr; + struct netdev_queue *txq; + int nr_frags, index; + dma_addr_t mapping; + unsigned int size; + skb_frag_t *frag; + u32 len_stat; + int ret; + int i; + + index = skb_get_queue_mapping(skb); + /* Mapping strategy: + * queue_mapping = 0, unclassified, packet xmited through ring16 + * queue_mapping = 1, goes to ring 0. (highest priority queue + * queue_mapping = 2, goes to ring 1. + * queue_mapping = 3, goes to ring 2. + * queue_mapping = 4, goes to ring 3. + */ + if (index == 0) + index = DESC_INDEX; + else + index -= 1; + + ring = &priv->tx_rings[index]; + txq = netdev_get_tx_queue(dev, ring->queue); + + nr_frags = skb_shinfo(skb)->nr_frags; + + spin_lock(&ring->lock); + if (ring->free_bds <= (nr_frags + 1)) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + netdev_err(dev, + "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Retain how many bytes will be sent on the wire, without TSB inserted + * by transmit checksum offload + */ + GENET_CB(skb)->bytes_sent = skb->len; + + /* add the Transmit Status Block */ + skb = bcmgenet_add_tsb(dev, skb); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + + for (i = 0; i <= nr_frags; i++) { + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + BUG_ON(!tx_cb_ptr); + + if (!i) { + /* Transmit single SKB or head of fragment list */ + GENET_CB(skb)->first_cb = tx_cb_ptr; + size = skb_headlen(skb); + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + /* xmit fragment */ + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); + } + + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); + ret = NETDEV_TX_OK; + goto out_unmap_frags; + } + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, size); + + tx_cb_ptr->skb = skb; + + len_stat = (size << DMA_BUFLENGTH_SHIFT) | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ + if (!i) { + len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_stat |= DMA_TX_DO_CSUM; + } + if (i == nr_frags) + len_stat |= DMA_EOP; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); + } + + GENET_CB(skb)->last_cb = tx_cb_ptr; + skb_tx_timestamp(skb); + + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= nr_frags + 1; + ring->prod_index += nr_frags + 1; + ring->prod_index &= DMA_P_INDEX_MASK; + + netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); + + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) + netif_tx_stop_queue(txq); + + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) + /* Packets are ready, update producer index */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); +out: + spin_unlock(&ring->lock); + + return ret; + +out_unmap_frags: + /* Back up for failed control block mapping */ + bcmgenet_put_txcb(priv, ring); + + /* Unmap successfully mapped control blocks */ + while (i-- > 0) { + tx_cb_ptr = bcmgenet_put_txcb(priv, ring); + bcmgenet_free_tx_cb(kdev, tx_cb_ptr); + } + + dev_kfree_skb(skb); + goto out; +} + +static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) +{ + struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; + struct sk_buff *rx_skb; + dma_addr_t mapping; + + /* Allocate a new Rx skb */ + skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, + GFP_ATOMIC | __GFP_NOWARN); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb allocation failed\n", __func__); + return NULL; + } + + /* DMA-map the new Rx skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + dev_kfree_skb_any(skb); + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + return NULL; + } + + /* Grab the current Rx skb from the ring and DMA-unmap it */ + rx_skb = bcmgenet_free_rx_cb(kdev, cb); + + /* Put the new Rx skb on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + + /* Return the current Rx skb to caller */ + return rx_skb; +} + +/* bcmgenet_desc_rx - descriptor based rx process. + * this could be called from bottom half, or from NAPI polling method. + */ +static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, + unsigned int budget) +{ + struct bcmgenet_priv *priv = ring->priv; + struct net_device *dev = priv->dev; + struct enet_cb *cb; + struct sk_buff *skb; + u32 dma_length_status; + unsigned long dma_flag; + int len; + unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int bytes_processed = 0; + unsigned int p_index, mask; + unsigned int discards; + + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) { + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_CLEAR); + } else { + mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); + bcmgenet_intrl2_1_writel(priv, + mask, + INTRL2_CPU_CLEAR); + } + + p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); + + discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & + DMA_P_INDEX_DISCARD_CNT_MASK; + if (discards > ring->old_discards) { + discards = discards - ring->old_discards; + ring->errors += discards; + ring->old_discards += discards; + + /* Clear HW register when we reach 75% of maximum 0xFFFF */ + if (ring->old_discards >= 0xC000) { + ring->old_discards = 0; + bcmgenet_rdma_ring_writel(priv, ring->index, 0, + RDMA_PROD_INDEX); + } + } + + p_index &= DMA_P_INDEX_MASK; + rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, rx_status, dev, + "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); + + while ((rxpktprocessed < rxpkttoprocess) && + (rxpktprocessed < budget)) { + struct status_64 *status; + __be16 rx_csum; + + cb = &priv->rx_cbs[ring->read_ptr]; + skb = bcmgenet_rx_refill(priv, cb); + + if (unlikely(!skb)) { + ring->dropped++; + goto next; + } + + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + if (dev->features & NETIF_F_RXCSUM) { + rx_csum = (__force __be16)(status->rx_csum & 0xffff); + if (rx_csum) { + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + } + + /* DMA flags and length are still valid no matter how + * we got the Receive Status Vector (64B RSB or register) + */ + dma_flag = dma_length_status & 0xffff; + len = dma_length_status >> DMA_BUFLENGTH_SHIFT; + + netif_dbg(priv, rx_status, dev, + "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); + + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { + netif_err(priv, rx_status, dev, + "dropping fragmented packet!\n"); + ring->errors++; + dev_kfree_skb_any(skb); + goto next; + } + + /* report errors */ + if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER))) { + netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", + (unsigned int)dma_flag); + if (dma_flag & DMA_RX_CRC_ERROR) + dev->stats.rx_crc_errors++; + if (dma_flag & DMA_RX_OV) + dev->stats.rx_over_errors++; + if (dma_flag & DMA_RX_NO) + dev->stats.rx_frame_errors++; + if (dma_flag & DMA_RX_LG) + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } /* error packet */ + + skb_put(skb, len); + + /* remove RSB and hardware 2bytes added for IP alignment */ + skb_pull(skb, 66); + len -= 66; + + if (priv->crc_fwd_en) { + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + bytes_processed += len; + + /*Finish setting up the received SKB and send it to the kernel*/ + skb->protocol = eth_type_trans(skb, priv->dev); + ring->packets++; + ring->bytes += len; + if (dma_flag & DMA_RX_MULT) + dev->stats.multicast++; + + /* Notify kernel */ + napi_gro_receive(&ring->napi, skb); + netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); + +next: + rxpktprocessed++; + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); + } + + ring->dim.bytes = bytes_processed; + ring->dim.packets = rxpktprocessed; + + return rxpktprocessed; +} + +/* Rx NAPI polling method */ +static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_rx_ring *ring = container_of(napi, + struct bcmgenet_rx_ring, napi); + struct dim_sample dim_sample = {}; + unsigned int work_done; + + work_done = bcmgenet_desc_rx(ring, budget); + + if (work_done < budget) { + napi_complete_done(napi, work_done); + ring->int_enable(ring); + } + + if (ring->dim.use_dim) { + dim_update_sample(ring->dim.event_ctr, ring->dim.packets, + ring->dim.bytes, &dim_sample); + net_dim(&ring->dim.dim, dim_sample); + } + + return work_done; +} + +static void bcmgenet_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct bcmgenet_net_dim *ndim = + container_of(dim, struct bcmgenet_net_dim, dim); + struct bcmgenet_rx_ring *ring = + container_of(ndim, struct bcmgenet_rx_ring, dim); + struct dim_cq_moder cur_profile = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); + dim->state = DIM_START_MEASURE; +} + +/* Assign skb to RX DMA descriptor. */ +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) +{ + struct enet_cb *cb; + struct sk_buff *skb; + int i; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* loop here for each buffer needing assign */ + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; + skb = bcmgenet_rx_refill(priv, cb); + if (skb) + dev_consume_skb_any(skb); + if (!cb->skb) + return -ENOMEM; + } + + return 0; +} + +static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) +{ + struct sk_buff *skb; + struct enet_cb *cb; + int i; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[i]; + + skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); + if (skb) + dev_consume_skb_any(skb); + } +} + +static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) +{ + u32 reg; + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + return; + if (enable) + reg |= mask; + else + reg &= ~mask; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-size packet + * to be processed + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static void reset_umac(struct bcmgenet_priv *priv) +{ + /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ + bcmgenet_rbuf_ctrl_set(priv, 0); + udelay(10); + + /* issue soft reset and disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); + udelay(2); +} + +static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) +{ + /* Mask all interrupts.*/ + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); +} + +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + int0_enable |= UMAC_IRQ_PHY_DET_R; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + +static void init_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + u32 reg; + u32 int0_enable = 0; + + dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); + + reset_umac(priv); + + /* clear tx/rx counter */ + bcmgenet_umac_writel(priv, + MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, + UMAC_MIB_CTRL); + bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); + + bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* init tx registers, enable TSB */ + reg = bcmgenet_tbuf_ctrl_get(priv); + reg |= TBUF_64B_EN; + bcmgenet_tbuf_ctrl_set(priv, reg); + + /* init rx registers, enable ip header optimization and RSB */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + reg |= RBUF_ALIGN_2B | RBUF_64B_EN; + bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + + /* enable rx checksumming */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (priv->crc_fwd_en) + reg |= RBUF_SKIP_FCS; + else + reg &= ~RBUF_SKIP_FCS; + bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL); + + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) + bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); + + bcmgenet_intr_disable(priv); + + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + reg = bcmgenet_bp_mc_get(priv); + reg |= BIT(priv->hw_params->bp_in_en_shift); + + /* bp_mask: back pressure mask */ + if (netif_is_multiqueue(priv->dev)) + reg |= priv->hw_params->bp_in_mask; + else + reg &= ~priv->hw_params->bp_in_mask; + bcmgenet_bp_mc_set(priv, reg); + } + + /* Enable MDIO interrupts on GENET v3+ */ + if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) + int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + + dev_dbg(kdev, "done init umac\n"); +} + +static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, + void (*cb)(struct work_struct *work)) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + struct dim_cq_moder moder; + u32 usecs, pkts; + + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_rx_moderation(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +/* Initialize a Tx ring along with corresponding hardware registers */ +static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + u32 flow_period_val = 0; + + spin_lock_init(&ring->lock); + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->queue = 0; + ring->int_enable = bcmgenet_tx_ring16_int_enable; + ring->int_disable = bcmgenet_tx_ring16_int_disable; + } else { + ring->queue = index + 1; + ring->int_enable = bcmgenet_tx_ring_int_enable; + ring->int_disable = bcmgenet_tx_ring_int_disable; + } + ring->cbs = priv->tx_cbs + start_ptr; + ring->size = size; + ring->clean_ptr = start_ptr; + ring->c_index = 0; + ring->free_bds = size; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + ring->prod_index = 0; + + /* Set flow period for ring != 16 */ + if (index != DESC_INDEX) + flow_period_val = ENET_MAX_MTU_SIZE << 16; + + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); + /* Disable rate control for now */ + bcmgenet_tdma_ring_writel(priv, index, flow_period_val, + TDMA_FLOW_PERIOD); + bcmgenet_tdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + + /* Set start and end address, read and write pointers */ + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_READ_PTR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_WRITE_PTR); + bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + /* Initialize Tx NAPI */ + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); +} + +/* Initialize a RDMA ring */ +static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + int ret; + + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->int_enable = bcmgenet_rx_ring16_int_enable; + ring->int_disable = bcmgenet_rx_ring16_int_disable; + } else { + ring->int_enable = bcmgenet_rx_ring_int_enable; + ring->int_disable = bcmgenet_rx_ring_int_disable; + } + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) + return ret; + + bcmgenet_init_dim(ring, bcmgenet_dim_work); + bcmgenet_init_rx_coalesce(ring); + + /* Initialize Rx NAPI */ + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, + NAPI_POLL_WEIGHT); + + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + bcmgenet_rdma_ring_writel(priv, index, + (DMA_FC_THRESH_LO << + DMA_XOFF_THRESHOLD_SHIFT) | + DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + return ret; +} + +static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Tx queues + * + * Queues 0-3 are priority-based, each one has 32 descriptors, + * with queue 0 being the highest priority queue. + * + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. + * + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] + */ +static void bcmgenet_init_tx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; + u32 dma_priority[3] = {0, 0, 0}; + + dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Enable strict priority arbiter mode */ + bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + + /* Initialize Tx priority queues */ + for (i = 0; i < priv->hw_params->tx_queues; i++) { + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(i)] |= + ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); + } + + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= + ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << + DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ + bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); + bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); + bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); +} + +static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + + /* Enable rings */ + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Configure ring as descriptor ring and re-enable DMA if enabled */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; +} + +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + u32 dma_ctrl; + int i; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + return ret; +} + +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + struct netdev_queue *txq; + int i; + + bcmgenet_fini_rx_napi(priv); + bcmgenet_fini_tx_napi(priv); + + for (i = 0; i < priv->num_tx_bds; i++) + dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, + priv->tx_cbs + i)); + + for (i = 0; i < priv->hw_params->tx_queues; i++) { + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); + netdev_tx_reset_queue(txq); + } + + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); + netdev_tx_reset_queue(txq); + + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); +} + +/* init_edma: Initialize DMA control register */ +static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +{ + int ret; + unsigned int i; + struct enet_cb *cb; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } + + /* Initialize common TX ring structures */ + priv->tx_bds = priv->base + priv->hw_params->tdma_offset; + priv->num_tx_bds = TOTAL_DESC; + priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->tx_cbs) { + kfree(priv->rx_cbs); + return -ENOMEM; + } + + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } + + /* Init rDma */ + bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); + if (ret) { + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); + return ret; + } + + /* Init tDma */ + bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); + + return 0; +} + +/* Interrupt bottom half */ +static void bcmgenet_irq_task(struct work_struct *work) +{ + unsigned int status; + struct bcmgenet_priv *priv = container_of( + work, struct bcmgenet_priv, bcmgenet_irq_work); + + netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + + spin_lock_irq(&priv->lock); + status = priv->irq0_stat; + priv->irq0_stat = 0; + spin_unlock_irq(&priv->lock); + + if (status & UMAC_IRQ_PHY_DET_R && + priv->dev->phydev->autoneg != AUTONEG_ENABLE) { + phy_init_hw(priv->dev->phydev); + genphy_config_aneg(priv->dev->phydev); + } + + /* Link UP/DOWN event */ + if (status & UMAC_IRQ_LINK_EVENT) + phy_mac_interrupt(priv->dev->phydev); + +} + +/* bcmgenet_isr1: handle Rx and Tx priority queues */ +static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int index, status; + + /* Read irq status */ + status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "%s: IRQ=0x%x\n", __func__, status); + + /* Check Rx priority queue interrupts */ + for (index = 0; index < priv->hw_params->rx_queues; index++) { + if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + continue; + + rx_ring = &priv->rx_rings[index]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + /* Check Tx priority queue interrupts */ + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(status & BIT(index))) + continue; + + tx_ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + return IRQ_HANDLED; +} + +/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ +static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int status; + unsigned long flags; + + /* Read irq status */ + status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "IRQ=0x%x\n", status); + + if (status & UMAC_IRQ_RXDMA_DONE) { + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + if (status & UMAC_IRQ_TXDMA_DONE) { + tx_ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + wake_up(&priv->wq); + } + + /* all other interested interrupts handled in bottom half */ + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); + if (status) { + /* Save irq status for bottom-half processing. */ + spin_lock_irqsave(&priv->lock, flags); + priv->irq0_stat |= status; + spin_unlock_irqrestore(&priv->lock, flags); + + schedule_work(&priv->bcmgenet_irq_work); + } + + return IRQ_HANDLED; +} + +static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) +{ + /* Acknowledge the interrupt */ + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcmgenet_poll_controller(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Invoke the main RX/TX interrupt handler */ + disable_irq(priv->irq0); + bcmgenet_isr0(priv->irq0, priv); + enable_irq(priv->irq0); + + /* And the interrupt handler for RX/TX priority queues */ + disable_irq(priv->irq1); + bcmgenet_isr1(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + +static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) +{ + u32 reg; + + reg = bcmgenet_rbuf_ctrl_get(priv); + reg |= BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + + reg &= ~BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); +} + +static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0); + bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1); +} + +static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + u32 addr_tmp; + + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0); + put_unaligned_be32(addr_tmp, &addr[0]); + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1); + put_unaligned_be16(addr_tmp, &addr[4]); +} + +/* Returns a reusable dma control register value */ +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +{ + unsigned int i; + u32 reg; + u32 dma_ctrl; + + /* disable DMA */ + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); + udelay(10); + bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + + return dma_ctrl; +} + +static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) +{ + u32 reg; + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static void bcmgenet_netif_start(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Start the network engine */ + bcmgenet_set_rx_mode(dev); + bcmgenet_enable_rx_napi(priv); + + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); + + bcmgenet_enable_tx_napi(priv); + + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + + phy_start(dev->phydev); +} + +static int bcmgenet_open(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + int ret; + + netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); + + /* Turn on the clock */ + clk_prepare_enable(priv->clk); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + /* take MAC out of reset */ + bcmgenet_umac_reset(priv); + + init_umac(priv); + + /* Apply features again in case we changed them while interface was + * down + */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto err_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + /* HFB init */ + bcmgenet_hfb_init(priv); + + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq0); + goto err_fini_dma; + } + + ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq1); + goto err_irq0; + } + + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } + + bcmgenet_netif_start(dev); + + netif_tx_start_all_queues(dev); + + return 0; + +err_irq1: + free_irq(priv->irq1, priv); +err_irq0: + free_irq(priv->irq0, priv); +err_fini_dma: + bcmgenet_dma_teardown(priv); + bcmgenet_fini_dma(priv); +err_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static void bcmgenet_netif_stop(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + bcmgenet_disable_tx_napi(priv); + netif_tx_disable(dev); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + bcmgenet_dma_teardown(priv); + + /* Disable MAC transmit. TX DMA disabled must be done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + + phy_stop(dev->phydev); + bcmgenet_disable_rx_napi(priv); + bcmgenet_intr_disable(priv); + + /* Wait for pending work items to complete. Since interrupts are + * disabled no new work will be scheduled. + */ + cancel_work_sync(&priv->bcmgenet_irq_work); + + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); +} + +static int bcmgenet_close(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); + + bcmgenet_netif_stop(dev); + + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(dev->phydev); + + free_irq(priv->irq0, priv); + free_irq(priv->irq1, priv); + + if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = ring->priv; + u32 p_index, c_index, intsts, intmsk; + struct netdev_queue *txq; + unsigned int free_bds; + bool txq_stopped; + + if (!netif_msg_tx_err(priv)) + return; + + txq = netdev_get_tx_queue(priv->dev, ring->queue); + + spin_lock(&ring->lock); + if (ring->index == DESC_INDEX) { + intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; + } else { + intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = 1 << ring->index; + } + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); + txq_stopped = netif_tx_queue_stopped(txq); + free_bds = ring->free_bds; + spin_unlock(&ring->lock); + + netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" + "TX queue status: %s, interrupts: %s\n" + "(sw)free_bds: %d (sw)size: %d\n" + "(sw)p_index: %d (hw)p_index: %d\n" + "(sw)c_index: %d (hw)c_index: %d\n" + "(sw)clean_p: %d (sw)write_p: %d\n" + "(sw)cb_ptr: %d (sw)end_ptr: %d\n", + ring->index, ring->queue, + txq_stopped ? "stopped" : "active", + intsts & intmsk ? "enabled" : "disabled", + free_bds, ring->size, + ring->prod_index, p_index & DMA_P_INDEX_MASK, + ring->c_index, c_index & DMA_C_INDEX_MASK, + ring->clean_ptr, ring->write_ptr, + ring->cb_ptr, ring->end_ptr); +} + +static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 int0_enable = 0; + u32 int1_enable = 0; + unsigned int q; + + netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + bcmgenet_dump_tx_queue(&priv->tx_rings[q]); + bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); + + bcmgenet_tx_reclaim_all(dev); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + int1_enable |= (1 << q); + + int0_enable = UMAC_IRQ_TXDMA_DONE; + + /* Re-enable TX interrupts if disabled */ + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + + netif_trans_update(dev); + + dev->stats.tx_errors++; + + netif_tx_wake_all_queues(dev); +} + +#define MAX_MDF_FILTER 17 + +static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, + unsigned char *addr, + int *i) +{ + bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], + UMAC_MDF_ADDR + (*i * 4)); + bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | + addr[4] << 8 | addr[5], + UMAC_MDF_ADDR + ((*i + 1) * 4)); + *i += 2; +} + +static void bcmgenet_set_rx_mode(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, nfilter; + u32 reg; + + netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); + + /* Number of filters needed */ + nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2; + + /* + * Turn on promicuous mode for three scenarios + * 1. IFF_PROMISC flag is set + * 2. IFF_ALLMULTI flag is set + * 3. The number of filters needed exceeds the number filters + * supported by the hardware. + */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || + (nfilter > MAX_MDF_FILTER)) { + reg |= CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); + return; + } else { + reg &= ~CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } + + /* update MDF filter */ + i = 0; + /* Broadcast */ + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); + /* my own address.*/ + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); + + /* Unicast */ + netdev_for_each_uc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Multicast */ + netdev_for_each_mc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Enable filters */ + reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); +} + +/* Set the hardware MAC address. */ +static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + /* Setting the MAC address at the hardware level is not possible + * without disabling the UniMAC RX/TX enable bits. + */ + if (netif_running(dev)) + return -EBUSY; + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + +static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long tx_bytes = 0, tx_packets = 0; + unsigned long rx_bytes = 0, rx_packets = 0; + unsigned long rx_errors = 0, rx_dropped = 0; + struct bcmgenet_tx_ring *tx_ring; + struct bcmgenet_rx_ring *rx_ring; + unsigned int q; + + for (q = 0; q < priv->hw_params->tx_queues; q++) { + tx_ring = &priv->tx_rings[q]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + } + tx_ring = &priv->tx_rings[DESC_INDEX]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + + for (q = 0; q < priv->hw_params->rx_queues; q++) { + rx_ring = &priv->rx_rings[q]; + + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + } + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_packets = rx_packets; + dev->stats.rx_errors = rx_errors; + dev->stats.rx_missed_errors = rx_errors; + dev->stats.rx_dropped = rx_dropped; + return &dev->stats; +} + +static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || + priv->phy_interface != PHY_INTERFACE_MODE_MOCA) + return -EOPNOTSUPP; + + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + return 0; +} + +static const struct net_device_ops bcmgenet_netdev_ops = { + .ndo_open = bcmgenet_open, + .ndo_stop = bcmgenet_close, + .ndo_start_xmit = bcmgenet_xmit, + .ndo_tx_timeout = bcmgenet_timeout, + .ndo_set_rx_mode = bcmgenet_set_rx_mode, + .ndo_set_mac_address = bcmgenet_set_mac_addr, + .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_set_features = bcmgenet_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcmgenet_poll_controller, +#endif + .ndo_get_stats = bcmgenet_get_stats, + .ndo_change_carrier = bcmgenet_change_carrier, +}; + +/* Array of GENET hardware parameters/characteristics */ +static struct bcmgenet_hw_params bcmgenet_hw_params[] = { + [GENET_V1] = { + .tx_queues = 0, + .tx_bds_per_q = 0, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .hfb_offset = 0x1000, + .rdma_offset = 0x2000, + .tdma_offset = 0x3000, + .words_per_bd = 2, + }, + [GENET_V2] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x1000, + .hfb_reg_offset = 0x2000, + .rdma_offset = 0x3000, + .tdma_offset = 0x4000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT, + }, + [GENET_V3] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x10000, + .tdma_offset = 0x11000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | + GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V4] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V5] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, +}; + +/* Infer hardware parameters from the detected GENET version */ +static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) +{ + struct bcmgenet_hw_params *params; + u32 reg; + u8 major; + u16 gphy_rev; + + if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v4; + } else if (GENET_IS_V3(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V2(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v2; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V1(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v1; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } + + /* enum genet_version starts at 1 */ + priv->hw_params = &bcmgenet_hw_params[priv->version]; + params = priv->hw_params; + + /* Read GENET HW version */ + reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); + major = (reg >> 24 & 0x0f); + if (major == 6) + major = 5; + else if (major == 5) + major = 4; + else if (major == 0) + major = 1; + if (major != priv->version) { + dev_err(&priv->pdev->dev, + "GENET version mismatch, got: %d, configured for: %d\n", + major, priv->version); + } + + /* Print the GENET core version */ + dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, + major, (reg >> 16) & 0x0f, reg & 0xffff); + + /* Store the integrated PHY revision for the MDIO probing function + * to pass this information to the PHY driver. The PHY driver expects + * to find the PHY major revision in bits 15:8 while the GENET register + * stores that information in bits 7:0, account for that. + * + * On newer chips, starting with PHY revision G0, a new scheme is + * deployed similar to the Starfighter 2 switch with GPHY major + * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 + * is reserved as well as special value 0x01ff, we have a small + * heuristic to check for the new GPHY revision and re-arrange things + * so the GPHY driver is happy. + */ + gphy_rev = reg & 0xffff; + + if (GENET_IS_V5(priv)) { + /* The EPHY revision should come from the MDIO registers of + * the PHY not from GENET. + */ + if (gphy_rev != 0) { + pr_warn("GENET is reporting EPHY revision: 0x%04x\n", + gphy_rev); + } + /* This is reserved so should require special treatment */ + } else if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + /* This is the good old scheme, just GPHY major, no minor nor patch */ + } else if ((gphy_rev & 0xf0) != 0) { + priv->gphy_rev = gphy_rev << 8; + /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ + } else if ((gphy_rev & 0xff00) != 0) { + priv->gphy_rev = gphy_rev; + } + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!(params->flags & GENET_HAS_40BITS)) + pr_warn("GENET does not support 40-bits PA\n"); +#endif + + pr_debug("Configuration for version: %d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" + "BP << en: %2d, BP msk: 0x%05x\n" + "HFB count: %2d, QTAQ msk: 0x%05x\n" + "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" + "RDMA: 0x%05x, TDMA: 0x%05x\n" + "Words/BD: %d\n", + priv->version, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, + params->bp_in_en_shift, params->bp_in_mask, + params->hfb_filter_cnt, params->qtag_mask, + params->tbuf_offset, params->hfb_offset, + params->hfb_reg_offset, + params->rdma_offset, params->tdma_offset, + params->words_per_bd); +} + +struct bcmgenet_plat_data { + enum bcmgenet_version version; + u32 dma_max_burst_length; +}; + +static const struct bcmgenet_plat_data v1_plat_data = { + .version = GENET_V1, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v2_plat_data = { + .version = GENET_V2, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v3_plat_data = { + .version = GENET_V3, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v4_plat_data = { + .version = GENET_V4, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v5_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data bcm2711_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = 0x08, +}; + +static const struct of_device_id bcmgenet_match[] = { + { .compatible = "brcm,genet-v1", .data = &v1_plat_data }, + { .compatible = "brcm,genet-v2", .data = &v2_plat_data }, + { .compatible = "brcm,genet-v3", .data = &v3_plat_data }, + { .compatible = "brcm,genet-v4", .data = &v4_plat_data }, + { .compatible = "brcm,genet-v5", .data = &v5_plat_data }, + { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcmgenet_match); + +static int bcmgenet_probe(struct platform_device *pdev) +{ + struct bcmgenet_platform_data *pd = pdev->dev.platform_data; + const struct bcmgenet_plat_data *pdata; + struct bcmgenet_priv *priv; + struct net_device *dev; + unsigned int i; + int err = -EIO; + + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); + if (!dev) { + dev_err(&pdev->dev, "can't allocate net device\n"); + return -ENOMEM; + } + + priv = netdev_priv(dev); + priv->irq0 = platform_get_irq(pdev, 0); + if (priv->irq0 < 0) { + err = priv->irq0; + goto err; + } + priv->irq1 = platform_get_irq(pdev, 1); + if (priv->irq1 < 0) { + err = priv->irq1; + goto err; + } + priv->wol_irq = platform_get_irq_optional(pdev, 2); + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + err = PTR_ERR(priv->base); + goto err; + } + + spin_lock_init(&priv->lock); + + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + dev->watchdog_timeo = 2 * HZ; + dev->ethtool_ops = &bcmgenet_ethtool_ops; + dev->netdev_ops = &bcmgenet_netdev_ops; + + priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); + + /* Set default features */ + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; + + /* Request the WOL interrupt and advertise suspend if available */ + priv->wol_irq_disabled = true; + if (priv->wol_irq > 0) { + err = devm_request_irq(&pdev->dev, priv->wol_irq, + bcmgenet_wol_isr, 0, dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + } + + /* Set the needed headroom to account for any possible + * features enabling/disabling at runtime + */ + dev->needed_headroom += 64; + + netdev_boot_setup_check(dev); + + priv->dev = dev; + priv->pdev = pdev; + + pdata = device_get_match_data(&pdev->dev); + if (pdata) { + priv->version = pdata->version; + priv->dma_max_burst_length = pdata->dma_max_burst_length; + } else { + priv->version = pd->genet_version; + priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; + } + + priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); + if (IS_ERR(priv->clk)) { + dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); + err = PTR_ERR(priv->clk); + goto err; + } + + err = clk_prepare_enable(priv->clk); + if (err) + goto err; + + bcmgenet_set_hw_params(priv); + + err = -EIO; + if (priv->hw_params->flags & GENET_HAS_40BITS) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_clk_disable; + + /* Mii wait queue */ + init_waitqueue_head(&priv->wq); + /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ + priv->rx_buf_len = RX_BUF_LENGTH; + INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); + + priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); + if (IS_ERR(priv->clk_wol)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); + err = PTR_ERR(priv->clk_wol); + goto err_clk_disable; + } + + priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); + if (IS_ERR(priv->clk_eee)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); + err = PTR_ERR(priv->clk_eee); + goto err_clk_disable; + } + + /* If this is an internal GPHY, power it on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + if (pd && !IS_ERR_OR_NULL(pd->mac_address)) + ether_addr_copy(dev->dev_addr, pd->mac_address); + else + if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN)) + if (has_acpi_companion(&pdev->dev)) + bcmgenet_get_hw_addr(priv, dev->dev_addr); + + if (!is_valid_ether_addr(dev->dev_addr)) { + dev_warn(&pdev->dev, "using random Ethernet MAC\n"); + eth_hw_addr_random(dev); + } + + reset_umac(priv); + + err = bcmgenet_mii_init(dev); + if (err) + goto err_clk_disable; + + /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues + * just the ring 16 descriptor based TX + */ + netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); + netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + + /* Set default coalescing parameters */ + for (i = 0; i < priv->hw_params->rx_queues; i++) + priv->rx_rings[i].rx_max_coalesced_frames = 1; + priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; + + /* libphy will determine the link state */ + netif_carrier_off(dev); + + /* Turn off the main clock, WOL clock is handled separately */ + clk_disable_unprepare(priv->clk); + + err = register_netdev(dev); + if (err) { + bcmgenet_mii_exit(dev); + goto err; + } + + return err; + +err_clk_disable: + clk_disable_unprepare(priv->clk); +err: + free_netdev(dev); + return err; +} + +static int bcmgenet_remove(struct platform_device *pdev) +{ + struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); + + dev_set_drvdata(&pdev->dev, NULL); + unregister_netdev(priv->dev); + bcmgenet_mii_exit(priv->dev); + free_netdev(priv->dev); + + return 0; +} + +static void bcmgenet_shutdown(struct platform_device *pdev) +{ + bcmgenet_remove(pdev); +} + +#ifdef CONFIG_PM_SLEEP +static int bcmgenet_resume_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + u32 reg; + + if (!netif_running(dev)) + return 0; + + /* Turn on the clock */ + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + if (device_may_wakeup(d) && priv->wolopts) { + /* Account for Wake-on-LAN events and clear those events + * (Some devices need more time between enabling the clocks + * and the interrupt register reflecting the wake event so + * read the register twice) + */ + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + if (reg & UMAC_IRQ_WAKE_EVENT) + pm_wakeup_event(&priv->pdev->dev, 0); + } + + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR); + + return 0; +} + +static int bcmgenet_resume(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + unsigned long dma_ctrl; + int ret; + + if (!netif_running(dev)) + return 0; + + /* From WOL-enabled suspend, switch to regular clock */ + if (device_may_wakeup(d) && priv->wolopts) + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + bcmgenet_umac_reset(priv); + + init_umac(priv); + + phy_init_hw(dev->phydev); + + /* Speed settings must be restored */ + genphy_config_aneg(dev->phydev); + bcmgenet_mii_config(priv->dev, false); + + /* Restore enabled features */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Restore hardware filters */ + bcmgenet_hfb_clear(priv); + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + bcmgenet_hfb_create_rxnfc_filter(priv, rule); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto out_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + if (!device_may_wakeup(d)) + phy_resume(dev->phydev); + + if (priv->eee.eee_enabled) + bcmgenet_eee_enable_set(dev, true); + + bcmgenet_netif_start(dev); + + netif_device_attach(dev); + + return 0; + +out_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static int bcmgenet_suspend(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + + bcmgenet_netif_stop(dev); + + if (!device_may_wakeup(d)) + phy_suspend(dev->phydev); + + /* Disable filtering */ + bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); + + return 0; +} + +static int bcmgenet_suspend_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + if (!netif_running(dev)) + return 0; + + /* Prepare the device for Wake-on-LAN and switch to the slow clock */ + if (device_may_wakeup(d) && priv->wolopts) + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); + else if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + /* Let the framework handle resumption and leave the clocks on */ + if (ret) + return ret; + + /* Turn off the clocks */ + clk_disable_unprepare(priv->clk); + + return 0; +} +#else +#define bcmgenet_suspend NULL +#define bcmgenet_suspend_noirq NULL +#define bcmgenet_resume NULL +#define bcmgenet_resume_noirq NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops bcmgenet_pm_ops = { + .suspend = bcmgenet_suspend, + .suspend_noirq = bcmgenet_suspend_noirq, + .resume = bcmgenet_resume, + .resume_noirq = bcmgenet_resume_noirq, +}; + +static const struct acpi_device_id genet_acpi_match[] = { + { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, genet_acpi_match); + +static struct platform_driver bcmgenet_driver = { + .probe = bcmgenet_probe, + .remove = bcmgenet_remove, + .shutdown = bcmgenet_shutdown, + .driver = { + .name = "bcmgenet", + .of_match_table = bcmgenet_match, + .pm = &bcmgenet_pm_ops, + .acpi_match_table = genet_acpi_match, + }, +}; +module_platform_driver(bcmgenet_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); +MODULE_ALIAS("platform:bcmgenet"); +MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: mdio-bcm-unimac"); diff --git a/devices/genet/bcmgenet-5.10-orig.h b/devices/genet/bcmgenet-5.10-orig.h new file mode 100644 index 00000000..f6ca01da --- /dev/null +++ b/devices/genet/bcmgenet-5.10-orig.h @@ -0,0 +1,759 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2020 Broadcom + */ + +#ifndef __BCMGENET_H__ +#define __BCMGENET_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* total number of Buffer Descriptors, same for Rx/Tx */ +#define TOTAL_DESC 256 + +/* which ring is descriptor based */ +#define DESC_INDEX 16 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN 6 +#define ENET_PAD 8 +#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ + ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) +#define DMA_MAX_BURST_LENGTH 0x10 + +/* misc. configuration */ +#define MAX_NUM_OF_FS_RULES 16 +#define CLEAR_ALL_HFB 0xFF +#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4) +#define DMA_FC_THRESH_LO 5 + +/* 64B receive/transmit status block */ +struct status_64 { + u32 length_status; /* length and peripheral status */ + u32 ext_status; /* Extended status*/ + u32 rx_csum; /* partial rx checksum */ + u32 unused1[9]; /* unused */ + u32 tx_csum_info; /* Tx checksum info. */ + u32 unused2[3]; /* unused */ +}; + +/* Rx status bits */ +#define STATUS_RX_EXT_MASK 0x1FFFFF +#define STATUS_RX_CSUM_MASK 0xFFFF +#define STATUS_RX_CSUM_OK 0x10000 +#define STATUS_RX_CSUM_FR 0x20000 +#define STATUS_RX_PROTO_TCP 0 +#define STATUS_RX_PROTO_UDP 1 +#define STATUS_RX_PROTO_ICMP 2 +#define STATUS_RX_PROTO_OTHER 3 +#define STATUS_RX_PROTO_MASK 3 +#define STATUS_RX_PROTO_SHIFT 18 +#define STATUS_FILTER_INDEX_MASK 0xFFFF +/* Tx status bits */ +#define STATUS_TX_CSUM_START_MASK 0X7FFF +#define STATUS_TX_CSUM_START_SHIFT 16 +#define STATUS_TX_CSUM_PROTO_UDP 0x8000 +#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF +#define STATUS_TX_CSUM_LV 0x80000000 + +/* DMA Descriptor */ +#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */ +#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */ +#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */ + +/* Rx/Tx common counter group */ +struct bcmgenet_pkt_counters { + u32 cnt_64; /* RO Received/Transmited 64 bytes packet */ + u32 cnt_127; /* RO Rx/Tx 127 bytes packet */ + u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */ + u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */ + u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */ + u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */ + u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */ + u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/ + u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/ + u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcmgenet_rx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkt; /* RO (0x428) Received pkt count*/ + u32 bytes; /* RO Received byte count */ + u32 mca; /* RO # of Received multicast pkt */ + u32 bca; /* RO # of Receive broadcast pkt */ + u32 fcs; /* RO # of Received FCS error */ + u32 cf; /* RO # of Received control frame pkt*/ + u32 pf; /* RO # of Received pause frame pkt */ + u32 uo; /* RO # of unknown op code pkt */ + u32 aln; /* RO # of alignment error count */ + u32 flr; /* RO # of frame length out of range count */ + u32 cde; /* RO # of code error pkt */ + u32 fcr; /* RO # of carrier sense error pkt */ + u32 ovr; /* RO # of oversize pkt*/ + u32 jbr; /* RO # of jabber count */ + u32 mtue; /* RO # of MTU error pkt*/ + u32 pok; /* RO # of Received good pkt */ + u32 uc; /* RO # of unicast pkt */ + u32 ppp; /* RO # of PPP pkt */ + u32 rcrc; /* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcmgenet_tx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkts; /* RO (0x4a8) Transmited pkt */ + u32 mca; /* RO # of xmited multicast pkt */ + u32 bca; /* RO # of xmited broadcast pkt */ + u32 pf; /* RO # of xmited pause frame count */ + u32 cf; /* RO # of xmited control frame count */ + u32 fcs; /* RO # of xmited FCS error count */ + u32 ovr; /* RO # of xmited oversize pkt */ + u32 drf; /* RO # of xmited deferral pkt */ + u32 edf; /* RO # of xmited Excessive deferral pkt*/ + u32 scl; /* RO # of xmited single collision pkt */ + u32 mcl; /* RO # of xmited multiple collision pkt*/ + u32 lcl; /* RO # of xmited late collision pkt */ + u32 ecl; /* RO # of xmited excessive collision pkt*/ + u32 frg; /* RO # of xmited fragments pkt*/ + u32 ncl; /* RO # of xmited total collision count */ + u32 jbr; /* RO # of xmited jabber count*/ + u32 bytes; /* RO # of xmited byte count */ + u32 pok; /* RO # of xmited good pkt */ + u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcmgenet_mib_counters { + struct bcmgenet_rx_counters rx; + struct bcmgenet_tx_counters tx; + u32 rx_runt_cnt; + u32 rx_runt_fcs; + u32 rx_runt_fcs_align; + u32 rx_runt_bytes; + u32 rbuf_ovflow_cnt; + u32 rbuf_err_cnt; + u32 mdf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed; +}; + +#define UMAC_HD_BKP_CTRL 0x004 +#define HD_FC_EN (1 << 0) +#define HD_FC_BKOFF_OK (1 << 1) +#define IPG_CONFIG_RX_SHIFT 2 +#define IPG_CONFIG_RX_MASK 0x1F + +#define UMAC_CMD 0x008 +#define CMD_TX_EN (1 << 0) +#define CMD_RX_EN (1 << 1) +#define UMAC_SPEED_10 0 +#define UMAC_SPEED_100 1 +#define UMAC_SPEED_1000 2 +#define UMAC_SPEED_2500 3 +#define CMD_SPEED_SHIFT 2 +#define CMD_SPEED_MASK 3 +#define CMD_PROMISC (1 << 4) +#define CMD_PAD_EN (1 << 5) +#define CMD_CRC_FWD (1 << 6) +#define CMD_PAUSE_FWD (1 << 7) +#define CMD_RX_PAUSE_IGNORE (1 << 8) +#define CMD_TX_ADDR_INS (1 << 9) +#define CMD_HD_EN (1 << 10) +#define CMD_SW_RESET (1 << 13) +#define CMD_LCL_LOOP_EN (1 << 15) +#define CMD_AUTO_CONFIG (1 << 22) +#define CMD_CNTL_FRM_EN (1 << 23) +#define CMD_NO_LEN_CHK (1 << 24) +#define CMD_RMT_LOOP_EN (1 << 25) +#define CMD_PRBL_EN (1 << 27) +#define CMD_TX_PAUSE_IGNORE (1 << 28) +#define CMD_TX_RX_EN (1 << 29) +#define CMD_RUNT_FILTER_DIS (1 << 30) + +#define UMAC_MAC0 0x00C +#define UMAC_MAC1 0x010 +#define UMAC_MAX_FRAME_LEN 0x014 + +#define UMAC_MODE 0x44 +#define MODE_LINK_STATUS (1 << 5) + +#define UMAC_EEE_CTRL 0x064 +#define EN_LPI_RX_PAUSE (1 << 0) +#define EN_LPI_TX_PFC (1 << 1) +#define EN_LPI_TX_PAUSE (1 << 2) +#define EEE_EN (1 << 3) +#define RX_FIFO_CHECK (1 << 4) +#define EEE_TX_CLK_DIS (1 << 5) +#define DIS_EEE_10M (1 << 6) +#define LP_IDLE_PREDICTION_MODE (1 << 7) + +#define UMAC_EEE_LPI_TIMER 0x068 +#define UMAC_EEE_WAKE_TIMER 0x06C +#define UMAC_EEE_REF_COUNT 0x070 +#define EEE_REFERENCE_COUNT_MASK 0xffff + +#define UMAC_TX_FLUSH 0x334 + +#define UMAC_MIB_START 0x400 + +#define UMAC_MDIO_CMD 0x614 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define UMAC_RBUF_OVFL_CNT_V1 0x61C +#define RBUF_OVFL_CNT_V2 0x80 +#define RBUF_OVFL_CNT_V3PLUS 0x94 + +#define UMAC_MPD_CTRL 0x620 +#define MPD_EN (1 << 0) +#define MPD_PW_EN (1 << 27) +#define MPD_MSEQ_LEN_SHIFT 16 +#define MPD_MSEQ_LEN_MASK 0xFF + +#define UMAC_MPD_PW_MS 0x624 +#define UMAC_MPD_PW_LS 0x628 +#define UMAC_RBUF_ERR_CNT_V1 0x634 +#define RBUF_ERR_CNT_V2 0x84 +#define RBUF_ERR_CNT_V3PLUS 0x98 +#define UMAC_MDF_ERR_CNT 0x638 +#define UMAC_MDF_CTRL 0x650 +#define UMAC_MDF_ADDR 0x654 +#define UMAC_MIB_CTRL 0x580 +#define MIB_RESET_RX (1 << 0) +#define MIB_RESET_RUNT (1 << 1) +#define MIB_RESET_TX (1 << 2) + +#define RBUF_CTRL 0x00 +#define RBUF_64B_EN (1 << 0) +#define RBUF_ALIGN_2B (1 << 1) +#define RBUF_BAD_DIS (1 << 2) + +#define RBUF_STATUS 0x0C +#define RBUF_STATUS_WOL (1 << 0) +#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1) +#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2) + +#define RBUF_CHK_CTRL 0x14 +#define RBUF_RXCHK_EN (1 << 0) +#define RBUF_SKIP_FCS (1 << 4) +#define RBUF_L3_PARSE_DIS (1 << 5) + +#define RBUF_ENERGY_CTRL 0x9c +#define RBUF_EEE_EN (1 << 0) +#define RBUF_PM_EN (1 << 1) + +#define RBUF_TBUF_SIZE_CTRL 0xb4 + +#define RBUF_HFB_CTRL_V1 0x38 +#define RBUF_HFB_FILTER_EN_SHIFT 16 +#define RBUF_HFB_FILTER_EN_MASK 0xffff0000 +#define RBUF_HFB_EN (1 << 0) +#define RBUF_HFB_256B (1 << 1) +#define RBUF_ACPI_EN (1 << 2) + +#define RBUF_HFB_LEN_V1 0x3C +#define RBUF_FLTR_LEN_MASK 0xFF +#define RBUF_FLTR_LEN_SHIFT 8 + +#define TBUF_CTRL 0x00 +#define TBUF_64B_EN (1 << 0) +#define TBUF_BP_MC 0x0C +#define TBUF_ENERGY_CTRL 0x14 +#define TBUF_EEE_EN (1 << 0) +#define TBUF_PM_EN (1 << 1) + +#define TBUF_CTRL_V1 0x80 +#define TBUF_BP_MC_V1 0xA0 + +#define HFB_CTRL 0x00 +#define HFB_FLT_ENABLE_V3PLUS 0x04 +#define HFB_FLT_LEN_V2 0x04 +#define HFB_FLT_LEN_V3PLUS 0x1C + +/* uniMac intrl2 registers */ +#define INTRL2_CPU_STAT 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0C +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* INTRL2 instance 0 definitions */ +#define UMAC_IRQ_SCB (1 << 0) +#define UMAC_IRQ_EPHY (1 << 1) +#define UMAC_IRQ_PHY_DET_R (1 << 2) +#define UMAC_IRQ_PHY_DET_F (1 << 3) +#define UMAC_IRQ_LINK_UP (1 << 4) +#define UMAC_IRQ_LINK_DOWN (1 << 5) +#define UMAC_IRQ_LINK_EVENT (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN) +#define UMAC_IRQ_UMAC (1 << 6) +#define UMAC_IRQ_UMAC_TSV (1 << 7) +#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8) +#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9) +#define UMAC_IRQ_HFB_SM (1 << 10) +#define UMAC_IRQ_HFB_MM (1 << 11) +#define UMAC_IRQ_MPD_R (1 << 12) +#define UMAC_IRQ_WAKE_EVENT (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \ + UMAC_IRQ_MPD_R) +#define UMAC_IRQ_RXDMA_MBDONE (1 << 13) +#define UMAC_IRQ_RXDMA_PDONE (1 << 14) +#define UMAC_IRQ_RXDMA_BDONE (1 << 15) +#define UMAC_IRQ_RXDMA_DONE UMAC_IRQ_RXDMA_MBDONE +#define UMAC_IRQ_TXDMA_MBDONE (1 << 16) +#define UMAC_IRQ_TXDMA_PDONE (1 << 17) +#define UMAC_IRQ_TXDMA_BDONE (1 << 18) +#define UMAC_IRQ_TXDMA_DONE UMAC_IRQ_TXDMA_MBDONE + +/* Only valid for GENETv3+ */ +#define UMAC_IRQ_MDIO_DONE (1 << 23) +#define UMAC_IRQ_MDIO_ERROR (1 << 24) + +/* INTRL2 instance 1 definitions */ +#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_SHIFT 16 + +/* Register block offsets */ +#define GENET_SYS_OFF 0x0000 +#define GENET_GR_BRIDGE_OFF 0x0040 +#define GENET_EXT_OFF 0x0080 +#define GENET_INTRL2_0_OFF 0x0200 +#define GENET_INTRL2_1_OFF 0x0240 +#define GENET_RBUF_OFF 0x0300 +#define GENET_UMAC_OFF 0x0800 + +/* SYS block offsets and register definitions */ +#define SYS_REV_CTRL 0x00 +#define SYS_PORT_CTRL 0x04 +#define PORT_MODE_INT_EPHY 0 +#define PORT_MODE_INT_GPHY 1 +#define PORT_MODE_EXT_EPHY 2 +#define PORT_MODE_EXT_GPHY 3 +#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4)) +#define PORT_MODE_EXT_RVMII_50 4 +#define LED_ACT_SOURCE_MAC (1 << 9) + +#define SYS_RBUF_FLUSH_CTRL 0x08 +#define SYS_TBUF_FLUSH_CTRL 0x0C +#define RBUF_FLUSH_CTRL_V1 0x04 + +/* Ext block register offsets and definitions */ +#define EXT_EXT_PWR_MGMT 0x00 +#define EXT_PWR_DOWN_BIAS (1 << 0) +#define EXT_PWR_DOWN_DLL (1 << 1) +#define EXT_PWR_DOWN_PHY (1 << 2) +#define EXT_PWR_DN_EN_LD (1 << 3) +#define EXT_ENERGY_DET (1 << 4) +#define EXT_IDDQ_FROM_PHY (1 << 5) +#define EXT_IDDQ_GLBL_PWR (1 << 7) +#define EXT_PHY_RESET (1 << 8) +#define EXT_ENERGY_DET_MASK (1 << 12) +#define EXT_PWR_DOWN_PHY_TX (1 << 16) +#define EXT_PWR_DOWN_PHY_RX (1 << 17) +#define EXT_PWR_DOWN_PHY_SD (1 << 18) +#define EXT_PWR_DOWN_PHY_RD (1 << 19) +#define EXT_PWR_DOWN_PHY_EN (1 << 20) + +#define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN_V123 (1 << 0) +#define RGMII_LINK (1 << 4) +#define OOB_DISABLE (1 << 5) +#define RGMII_MODE_EN (1 << 6) +#define ID_MODE_DIS (1 << 16) + +#define EXT_GPHY_CTRL 0x1C +#define EXT_CFG_IDDQ_BIAS (1 << 0) +#define EXT_CFG_PWR_DOWN (1 << 1) +#define EXT_CK25_DIS (1 << 4) +#define EXT_GPHY_RESET (1 << 5) + +/* DMA rings size */ +#define DMA_RING_SIZE (0x40) +#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1)) + +/* DMA registers common definitions */ +#define DMA_RW_POINTER_MASK 0x1FF +#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF +#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16 +#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF +#define DMA_BUFFER_DONE_CNT_SHIFT 16 +#define DMA_P_INDEX_MASK 0xFFFF +#define DMA_C_INDEX_MASK 0xFFFF + +/* DMA ring size register */ +#define DMA_RING_SIZE_MASK 0xFFFF +#define DMA_RING_SIZE_SHIFT 16 +#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF + +/* DMA interrupt threshold register */ +#define DMA_INTR_THRESHOLD_MASK 0x01FF + +/* DMA XON/XOFF register */ +#define DMA_XON_THREHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_SHIFT 16 + +/* DMA flow period register */ +#define DMA_FLOW_PERIOD_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_SHIFT 16 + + +/* DMA control register */ +#define DMA_EN (1 << 0) +#define DMA_RING_BUF_EN_SHIFT 0x01 +#define DMA_RING_BUF_EN_MASK 0xFFFF +#define DMA_TSB_SWAP_EN (1 << 20) + +/* DMA status register */ +#define DMA_DISABLED (1 << 0) +#define DMA_DESC_RAM_INIT_BUSY (1 << 1) + +/* DMA SCB burst size register */ +#define DMA_SCB_BURST_SIZE_MASK 0x1F + +/* DMA activity vector register */ +#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF + +/* DMA backpressure mask register */ +#define DMA_BACKPRESSURE_MASK 0x1FFFF +#define DMA_PFC_ENABLE (1 << 31) + +/* DMA backpressure status register */ +#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF + +/* DMA override register */ +#define DMA_LITTLE_ENDIAN_MODE (1 << 0) +#define DMA_REGISTER_MODE (1 << 1) + +/* DMA timeout register */ +#define DMA_TIMEOUT_MASK 0xFFFF +#define DMA_TIMEOUT_VAL 5000 /* micro seconds */ + +/* TDMA rate limiting control register */ +#define DMA_RATE_LIMIT_EN_MASK 0xFFFF + +/* TDMA arbitration control register */ +#define DMA_ARBITER_MODE_MASK 0x03 +#define DMA_RING_BUF_PRIORITY_MASK 0x1F +#define DMA_RING_BUF_PRIORITY_SHIFT 5 +#define DMA_PRIO_REG_INDEX(q) ((q) / 6) +#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT) +#define DMA_RATE_ADJ_MASK 0xFF + +/* Tx/Rx Dma Descriptor common bits*/ +#define DMA_BUFLENGTH_MASK 0x0fff +#define DMA_BUFLENGTH_SHIFT 16 +#define DMA_OWN 0x8000 +#define DMA_EOP 0x4000 +#define DMA_SOP 0x2000 +#define DMA_WRAP 0x1000 +/* Tx specific Dma descriptor bits */ +#define DMA_TX_UNDERRUN 0x0200 +#define DMA_TX_APPEND_CRC 0x0040 +#define DMA_TX_OW_CRC 0x0020 +#define DMA_TX_DO_CSUM 0x0010 +#define DMA_TX_QTAG_SHIFT 7 + +/* Rx Specific Dma descriptor bits */ +#define DMA_RX_CHK_V3PLUS 0x8000 +#define DMA_RX_CHK_V12 0x1000 +#define DMA_RX_BRDCAST 0x0040 +#define DMA_RX_MULT 0x0020 +#define DMA_RX_LG 0x0010 +#define DMA_RX_NO 0x0008 +#define DMA_RX_RXER 0x0004 +#define DMA_RX_CRC_ERROR 0x0002 +#define DMA_RX_OV 0x0001 +#define DMA_RX_FI_MASK 0x001F +#define DMA_RX_FI_SHIFT 0x0007 +#define DMA_DESC_ALLOC_MASK 0x00FF + +#define DMA_ARBITER_RR 0x00 +#define DMA_ARBITER_WRR 0x01 +#define DMA_ARBITER_SP 0x02 + +struct enet_cb { + struct sk_buff *skb; + void __iomem *bd_addr; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* power management mode */ +enum bcmgenet_power_mode { + GENET_POWER_CABLE_SENSE = 0, + GENET_POWER_PASSIVE, + GENET_POWER_WOL_MAGIC, +}; + +struct bcmgenet_priv; + +/* We support both runtime GENET detection and compile-time + * to optimize code-paths for a given hardware + */ +enum bcmgenet_version { + GENET_V1 = 1, + GENET_V2, + GENET_V3, + GENET_V4, + GENET_V5 +}; + +#define GENET_IS_V1(p) ((p)->version == GENET_V1) +#define GENET_IS_V2(p) ((p)->version == GENET_V2) +#define GENET_IS_V3(p) ((p)->version == GENET_V3) +#define GENET_IS_V4(p) ((p)->version == GENET_V4) +#define GENET_IS_V5(p) ((p)->version == GENET_V5) + +/* Hardware flags */ +#define GENET_HAS_40BITS (1 << 0) +#define GENET_HAS_EXT (1 << 1) +#define GENET_HAS_MDIO_INTR (1 << 2) +#define GENET_HAS_MOCA_LINK_DET (1 << 3) + +/* BCMGENET hardware parameters, keep this structure nicely aligned + * since it is going to be used in hot paths + */ +struct bcmgenet_hw_params { + u8 tx_queues; + u8 tx_bds_per_q; + u8 rx_queues; + u8 rx_bds_per_q; + u8 bp_in_en_shift; + u32 bp_in_mask; + u8 hfb_filter_cnt; + u8 hfb_filter_size; + u8 qtag_mask; + u16 tbuf_offset; + u32 hfb_offset; + u32 hfb_reg_offset; + u32 rdma_offset; + u32 tdma_offset; + u32 words_per_bd; + u32 flags; +}; + +struct bcmgenet_skb_cb { + struct enet_cb *first_cb; /* First control block of SKB */ + struct enet_cb *last_cb; /* Last control block of SKB */ + unsigned int bytes_sent; /* bytes on the wire (no TSB) */ +}; + +#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb)) + +struct bcmgenet_tx_ring { + spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ + unsigned long packets; + unsigned long bytes; + unsigned int index; /* ring index */ + unsigned int queue; /* queue index */ + struct enet_cb *cbs; /* tx ring buffer control block*/ + unsigned int size; /* size of each tx ring */ + unsigned int clean_ptr; /* Tx ring clean pointer */ + unsigned int c_index; /* last consumer index of each ring*/ + unsigned int free_bds; /* # of free bds for each ring */ + unsigned int write_ptr; /* Tx ring write pointer SW copy */ + unsigned int prod_index; /* Tx ring producer index SW copy */ + unsigned int cb_ptr; /* Tx ring initial CB ptr */ + unsigned int end_ptr; /* Tx ring end CB ptr */ + void (*int_enable)(struct bcmgenet_tx_ring *); + void (*int_disable)(struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; +}; + +struct bcmgenet_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct dim dim; +}; + +struct bcmgenet_rx_ring { + struct napi_struct napi; /* Rx NAPI struct */ + unsigned long bytes; + unsigned long packets; + unsigned long errors; + unsigned long dropped; + unsigned int index; /* Rx ring index */ + struct enet_cb *cbs; /* Rx ring buffer control block */ + unsigned int size; /* Rx ring size */ + unsigned int c_index; /* Rx last consumer index */ + unsigned int read_ptr; /* Rx ring read pointer */ + unsigned int cb_ptr; /* Rx ring initial CB ptr */ + unsigned int end_ptr; /* Rx ring end CB ptr */ + unsigned int old_discards; + struct bcmgenet_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; + void (*int_enable)(struct bcmgenet_rx_ring *); + void (*int_disable)(struct bcmgenet_rx_ring *); + struct bcmgenet_priv *priv; +}; + +enum bcmgenet_rxnfc_state { + BCMGENET_RXNFC_STATE_UNUSED = 0, + BCMGENET_RXNFC_STATE_DISABLED, + BCMGENET_RXNFC_STATE_ENABLED +}; + +struct bcmgenet_rxnfc_rule { + struct list_head list; + struct ethtool_rx_flow_spec fs; + enum bcmgenet_rxnfc_state state; +}; + +/* device context */ +struct bcmgenet_priv { + void __iomem *base; + enum bcmgenet_version version; + struct net_device *dev; + + /* transmit variables */ + void __iomem *tx_bds; + struct enet_cb *tx_cbs; + unsigned int num_tx_bds; + + struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + + /* receive variables */ + void __iomem *rx_bds; + struct enet_cb *rx_cbs; + unsigned int num_rx_bds; + unsigned int rx_buf_len; + struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES]; + struct list_head rxnfc_list; + + struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; + + /* other misc variables */ + struct bcmgenet_hw_params *hw_params; + + /* MDIO bus variables */ + wait_queue_head_t wq; + bool internal_phy; + struct device_node *phy_dn; + struct device_node *mdio_dn; + struct mii_bus *mii_bus; + u16 gphy_rev; + struct clk *clk_eee; + bool clk_eee_enabled; + + /* PHY device variables */ + int old_link; + int old_speed; + int old_duplex; + int old_pause; + phy_interface_t phy_interface; + int phy_addr; + int ext_phy; + + /* Interrupt variables */ + struct work_struct bcmgenet_irq_work; + int irq0; + int irq1; + int wol_irq; + bool wol_irq_disabled; + + /* shared status */ + spinlock_t lock; + unsigned int irq0_stat; + + /* HW descriptors/checksum variables */ + bool crc_fwd_en; + + u32 dma_max_burst_length; + + u32 msg_enable; + + struct clk *clk; + struct platform_device *pdev; + struct platform_device *mii_pdev; + + /* WOL */ + struct clk *clk_wol; + u32 wolopts; + u8 sopass[SOPASS_MAX]; + bool wol_active; + + struct bcmgenet_mib_counters mib; + + struct ethtool_eee eee; +}; + +#define GENET_IO_MACRO(name, offset) \ +static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ + u32 off) \ +{ \ + /* MIPS chips strapped for BE will automagically configure the \ + * peripheral registers for CPU-native byte order. \ + */ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + return __raw_readl(priv->base + offset + off); \ + else \ + return readl_relaxed(priv->base + offset + off); \ +} \ +static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ + u32 val, u32 off) \ +{ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + __raw_writel(val, priv->base + offset + off); \ + else \ + writel_relaxed(val, priv->base + offset + off); \ +} + +GENET_IO_MACRO(ext, GENET_EXT_OFF); +GENET_IO_MACRO(umac, GENET_UMAC_OFF); +GENET_IO_MACRO(sys, GENET_SYS_OFF); + +/* interrupt l2 registers accessors */ +GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF); +GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF); + +/* HFB register accessors */ +GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset); + +/* GENET v2+ HFB control and filter len helpers */ +GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset); + +/* RBUF register accessors */ +GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); + +/* MDIO routines */ +int bcmgenet_mii_init(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_probe(struct net_device *dev); +void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_phy_power_set(struct net_device *dev, bool enable); +void bcmgenet_mii_setup(struct net_device *dev); + +/* Wake-on-LAN routines */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); + +#endif /* __BCMGENET_H__ */ diff --git a/devices/genet/bcmgenet-5.14-ethercat.c b/devices/genet/bcmgenet-5.14-ethercat.c new file mode 100644 index 00000000..7ea02294 --- /dev/null +++ b/devices/genet/bcmgenet-5.14-ethercat.c @@ -0,0 +1,4454 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) controller driver + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bcmgenet-5.14-ethercat.h" + +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + + +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT 4 + +/* Default highest priority queue for multi queue support */ +#define GENET_Q0_PRIORITY 0 + +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) + +#define RX_BUF_LENGTH 2048 +#define SKB_ALIGNMENT 32 + +/* Tx/Rx DMA register offset, skip 256 descriptors */ +#define WORDS_PER_BD(p) (p->hw_params->words_per_bd) +#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) + +#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +/* Forward declarations */ +static void bcmgenet_set_rx_mode(struct net_device *dev); + +static inline void bcmgenet_writel(u32 value, void __iomem *offset) +{ + /* MIPS chips strapped for BE will automagically configure the + * peripheral registers for CPU-native byte order. + */ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(value, offset); + else + writel_relaxed(value, offset); +} + +static inline u32 bcmgenet_readl(void __iomem *offset) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(offset); + else + return readl_relaxed(offset); +} + +static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, + void __iomem *d, u32 value) +{ + bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); +} + +static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, + void __iomem *d, + dma_addr_t addr) +{ + bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); +#endif +} + +/* Combined address + length/status setter */ +static inline void dmadesc_set(struct bcmgenet_priv *priv, + void __iomem *d, dma_addr_t addr, u32 val) +{ + dmadesc_set_addr(priv, d, addr); + dmadesc_set_length_status(priv, d, val); +} + +static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, + void __iomem *d) +{ + dma_addr_t addr; + + addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32; +#endif + return addr; +} + +#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" + +#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); + else + return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); +} + +static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); + else + bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); +} + +/* These macros are defined to deal with register map change + * between GENET1.1 and GENET2. Only those currently being used + * by driver are defined. + */ +static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +/* RX/TX DMA register accessors */ +enum dma_reg { + DMA_RING_CFG = 0, + DMA_CTRL, + DMA_STATUS, + DMA_SCB_BURST_SIZE, + DMA_ARB_CTRL, + DMA_PRIORITY_0, + DMA_PRIORITY_1, + DMA_PRIORITY_2, + DMA_INDEX2RING_0, + DMA_INDEX2RING_1, + DMA_INDEX2RING_2, + DMA_INDEX2RING_3, + DMA_INDEX2RING_4, + DMA_INDEX2RING_5, + DMA_INDEX2RING_6, + DMA_INDEX2RING_7, + DMA_RING0_TIMEOUT, + DMA_RING1_TIMEOUT, + DMA_RING2_TIMEOUT, + DMA_RING3_TIMEOUT, + DMA_RING4_TIMEOUT, + DMA_RING5_TIMEOUT, + DMA_RING6_TIMEOUT, + DMA_RING7_TIMEOUT, + DMA_RING8_TIMEOUT, + DMA_RING9_TIMEOUT, + DMA_RING10_TIMEOUT, + DMA_RING11_TIMEOUT, + DMA_RING12_TIMEOUT, + DMA_RING13_TIMEOUT, + DMA_RING14_TIMEOUT, + DMA_RING15_TIMEOUT, + DMA_RING16_TIMEOUT, +}; + +static const u8 bcmgenet_dma_regs_v3plus[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x2C, + [DMA_PRIORITY_0] = 0x30, + [DMA_PRIORITY_1] = 0x34, + [DMA_PRIORITY_2] = 0x38, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, + [DMA_INDEX2RING_0] = 0x70, + [DMA_INDEX2RING_1] = 0x74, + [DMA_INDEX2RING_2] = 0x78, + [DMA_INDEX2RING_3] = 0x7C, + [DMA_INDEX2RING_4] = 0x80, + [DMA_INDEX2RING_5] = 0x84, + [DMA_INDEX2RING_6] = 0x88, + [DMA_INDEX2RING_7] = 0x8C, +}; + +static const u8 bcmgenet_dma_regs_v2[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +static const u8 bcmgenet_dma_regs_v1[] = { + [DMA_CTRL] = 0x00, + [DMA_STATUS] = 0x04, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +/* Set at runtime once bcmgenet version is known */ +static const u8 *bcmgenet_dma_regs; + +static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) +{ + return netdev_priv(dev_get_drvdata(dev)); +} + +static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +/* RDMA/TDMA ring registers and accessors + * we merge the common fields and just prefix with T/D the registers + * having different meaning depending on the direction + */ +enum dma_ring_reg { + TDMA_READ_PTR = 0, + RDMA_WRITE_PTR = TDMA_READ_PTR, + TDMA_READ_PTR_HI, + RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, + TDMA_CONS_INDEX, + RDMA_PROD_INDEX = TDMA_CONS_INDEX, + TDMA_PROD_INDEX, + RDMA_CONS_INDEX = TDMA_PROD_INDEX, + DMA_RING_BUF_SIZE, + DMA_START_ADDR, + DMA_START_ADDR_HI, + DMA_END_ADDR, + DMA_END_ADDR_HI, + DMA_MBUF_DONE_THRESH, + TDMA_FLOW_PERIOD, + RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, + TDMA_WRITE_PTR, + RDMA_READ_PTR = TDMA_WRITE_PTR, + TDMA_WRITE_PTR_HI, + RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI +}; + +/* GENET v4 supports 40-bits pointer addressing + * for obvious reasons the LO and HI word parts + * are contiguous, but this offsets the other + * registers. + */ +static const u8 genet_dma_ring_regs_v4[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_READ_PTR_HI] = 0x04, + [TDMA_CONS_INDEX] = 0x08, + [TDMA_PROD_INDEX] = 0x0C, + [DMA_RING_BUF_SIZE] = 0x10, + [DMA_START_ADDR] = 0x14, + [DMA_START_ADDR_HI] = 0x18, + [DMA_END_ADDR] = 0x1C, + [DMA_END_ADDR_HI] = 0x20, + [DMA_MBUF_DONE_THRESH] = 0x24, + [TDMA_FLOW_PERIOD] = 0x28, + [TDMA_WRITE_PTR] = 0x2C, + [TDMA_WRITE_PTR_HI] = 0x30, +}; + +static const u8 genet_dma_ring_regs_v123[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_CONS_INDEX] = 0x04, + [TDMA_PROD_INDEX] = 0x08, + [DMA_RING_BUF_SIZE] = 0x0C, + [DMA_START_ADDR] = 0x10, + [DMA_END_ADDR] = 0x14, + [DMA_MBUF_DONE_THRESH] = 0x18, + [TDMA_FLOW_PERIOD] = 0x1C, + [TDMA_WRITE_PTR] = 0x20, +}; + +/* Set at runtime once GENET version is known */ +static const u8 *genet_dma_ring_regs; + +static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg |= RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); +} + +static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset, reg, reg1; + + offset = HFB_FLT_ENABLE_V3PLUS; + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + if (f_index < 32) { + reg1 &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32)); + } else { + reg &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + } + if (!reg && !reg1) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg &= ~RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } +} + +static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, + u32 f_index, u32 rx_queue) +{ + u32 offset; + u32 reg; + + offset = f_index / 8; + reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); + reg &= ~(0xF << (4 * (f_index % 8))); + reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); + bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); +} + +static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, + u32 f_index, u32 f_length) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_LEN_V3PLUS + + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * + sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg &= ~(0xFF << (8 * (f_index % 4))); + reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static int bcmgenet_hfb_validate_mask(void *mask, size_t size) +{ + while (size) { + switch (*(unsigned char *)mask++) { + case 0x00: + case 0x0f: + case 0xf0: + case 0xff: + size--; + continue; + default: + return -EINVAL; + } + } + + return 0; +} + +#define VALIDATE_MASK(x) \ + bcmgenet_hfb_validate_mask(&(x), sizeof(x)) + +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, + u32 offset, void *val, void *mask, + size_t size) +{ + u32 index, tmp; + + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); + + while (size--) { + if (offset++ & 1) { + tmp &= ~0x300FF; + tmp |= (*(unsigned char *)val++); + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0x30000; + break; + case 0xF0: + tmp |= 0x20000; + break; + case 0x0F: + tmp |= 0x10000; + break; + } + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); + if (size) + tmp = bcmgenet_hfb_readl(priv, + index * sizeof(u32)); + } else { + tmp &= ~0xCFF00; + tmp |= (*(unsigned char *)val++) << 8; + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0xC0000; + break; + case 0xF0: + tmp |= 0x80000; + break; + case 0x0F: + tmp |= 0x40000; + break; + } + if (!size) + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); + } + } + + return 0; +} + +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, + struct bcmgenet_rxnfc_rule *rule) +{ + struct ethtool_rx_flow_spec *fs = &rule->fs; + u32 offset = 0, f_length = 0, f; + u8 val_8, mask_8; + __be16 val_16; + u16 mask_16; + size_t size; + + f = fs->location; + if (fs->flow_type & FLOW_MAC_EXT) { + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_ext.h_dest, &fs->m_ext.h_dest, + sizeof(fs->h_ext.h_dest)); + } + + if (fs->flow_type & FLOW_EXT) { + if (fs->m_ext.vlan_etype || + fs->m_ext.vlan_tci) { + bcmgenet_hfb_insert_data(priv, f, 12, + &fs->h_ext.vlan_etype, + &fs->m_ext.vlan_etype, + sizeof(fs->h_ext.vlan_etype)); + bcmgenet_hfb_insert_data(priv, f, 14, + &fs->h_ext.vlan_tci, + &fs->m_ext.vlan_tci, + sizeof(fs->h_ext.vlan_tci)); + offset += VLAN_HLEN; + f_length += DIV_ROUND_UP(VLAN_HLEN, 2); + } + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN, 2); + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_u.ether_spec.h_dest, + &fs->m_u.ether_spec.h_dest, + sizeof(fs->h_u.ether_spec.h_dest)); + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, + &fs->h_u.ether_spec.h_source, + &fs->m_u.ether_spec.h_source, + sizeof(fs->h_u.ether_spec.h_source)); + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &fs->h_u.ether_spec.h_proto, + &fs->m_u.ether_spec.h_proto, + sizeof(fs->h_u.ether_spec.h_proto)); + break; + case IP_USER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2); + /* Specify IP Ether Type */ + val_16 = htons(ETH_P_IP); + mask_16 = 0xFFFF; + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmgenet_hfb_insert_data(priv, f, 15 + offset, + &fs->h_u.usr_ip4_spec.tos, + &fs->m_u.usr_ip4_spec.tos, + sizeof(fs->h_u.usr_ip4_spec.tos)); + bcmgenet_hfb_insert_data(priv, f, 23 + offset, + &fs->h_u.usr_ip4_spec.proto, + &fs->m_u.usr_ip4_spec.proto, + sizeof(fs->h_u.usr_ip4_spec.proto)); + bcmgenet_hfb_insert_data(priv, f, 26 + offset, + &fs->h_u.usr_ip4_spec.ip4src, + &fs->m_u.usr_ip4_spec.ip4src, + sizeof(fs->h_u.usr_ip4_spec.ip4src)); + bcmgenet_hfb_insert_data(priv, f, 30 + offset, + &fs->h_u.usr_ip4_spec.ip4dst, + &fs->m_u.usr_ip4_spec.ip4dst, + sizeof(fs->h_u.usr_ip4_spec.ip4dst)); + if (!fs->m_u.usr_ip4_spec.l4_4_bytes) + break; + + /* Only supports 20 byte IPv4 header */ + val_8 = 0x45; + mask_8 = 0xFF; + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, + &val_8, &mask_8, + sizeof(val_8)); + size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); + bcmgenet_hfb_insert_data(priv, f, + ETH_HLEN + 20 + offset, + &fs->h_u.usr_ip4_spec.l4_4_bytes, + &fs->m_u.usr_ip4_spec.l4_4_bytes, + size); + f_length += DIV_ROUND_UP(size, 2); + break; + } + + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); + if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { + /* Ring 0 flows can be handled by the default Descriptor Ring + * We'll map them to ring 0, but don't enable the filter + */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); + rule->state = BCMGENET_RXNFC_STATE_DISABLED; + } else { + /* Other Rx rings are direct mapped here */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, + fs->ring_cookie); + bcmgenet_hfb_enable_filter(priv, f); + rule->state = BCMGENET_RXNFC_STATE_ENABLED; + } +} + +/* bcmgenet_hfb_clear + * + * Clear Hardware Filter Block and disable all filtering. + */ +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 base, i; + + base = f_index * priv->hw_params->hfb_filter_size; + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); +} + +static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) +{ + u32 i; + + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); + + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0x0, i); + + for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) + bcmgenet_hfb_reg_writel(priv, 0x0, + HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) + bcmgenet_hfb_clear_filter(priv, i); +} + +static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) +{ + int i; + + INIT_LIST_HEAD(&priv->rxnfc_list); + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); + priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; + } + + bcmgenet_hfb_clear(priv); +} + +static int bcmgenet_begin(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn on the clock */ + return clk_prepare_enable(priv->clk); +} + +static void bcmgenet_complete(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn off the clock */ + clk_disable_unprepare(priv->clk); +} + +static int bcmgenet_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + phy_ethtool_ksettings_get(dev->phydev, cmd); + + return 0; +} + +static int bcmgenet_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + return phy_ethtool_ksettings_set(dev->phydev, cmd); +} + +static int bcmgenet_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Make sure we reflect the value of CRC_CMD_FWD */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static u32 bcmgenet_get_msglevel(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + priv->msg_enable = level; +} + +static int bcmgenet_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rx_ring *ring; + unsigned int i; + + ec->tx_max_coalesced_frames = + bcmgenet_tdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_max_coalesced_frames = + bcmgenet_rdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_coalesce_usecs = + bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; + + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ring = &priv->rx_rings[i]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + } + ring = &priv->rx_rings[DESC_INDEX]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + + return 0; +} + +static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, + u32 usecs, u32 pkts) +{ + struct bcmgenet_priv *priv = ring->priv; + unsigned int i = ring->index; + u32 reg; + + bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH); + + reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); + reg &= ~DMA_TIMEOUT_MASK; + reg |= DIV_ROUND_UP(usecs * 1000, 8192); + bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); +} + +static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct dim_cq_moder moder; + u32 usecs, pkts; + + ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; + ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { + moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + ring->dim.use_dim = ec->use_adaptive_rx_coalesce; + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +static int bcmgenet_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int i; + + /* Base system clock is 125Mhz, DMA timeout is this reference clock + * divided by 1024, which yields roughly 8.192us, our maximum value + * has to fit in the DMA_TIMEOUT_MASK (16 bits) + */ + if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->tx_max_coalesced_frames == 0 || + ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) + return -EINVAL; + + /* GENET TDMA hardware does not support a configurable timeout, but will + * always generate an interrupt either after MBDONE packets have been + * transmitted, or when the ring is empty. + */ + + /* Program all TX queues with the same values, as there is no + * ethtool knob to do coalescing on a per-queue basis + */ + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tdma_ring_writel(priv, i, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + bcmgenet_tdma_ring_writel(priv, DESC_INDEX, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + + for (i = 0; i < priv->hw_params->rx_queues; i++) + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); + + return 0; +} + +/* standard ethtool support functions. */ +enum bcmgenet_stat_type { + BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_MIB_RX, + BCMGENET_STAT_MIB_TX, + BCMGENET_STAT_RUNT, + BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, +}; + +struct bcmgenet_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_sizeof; + int stat_offset; + enum bcmgenet_stat_type type; + /* reg offset from UMAC base for misc counters */ + u16 reg_offset; +}; + +#define STAT_NETDEV(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ + .stat_offset = offsetof(struct net_device_stats, m), \ + .type = BCMGENET_STAT_NETDEV, \ +} + +#define STAT_GENET_MIB(str, m, _type) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = _type, \ +} + +#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) +#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) +#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) + +#define STAT_GENET_MISC(str, m, offset) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = BCMGENET_STAT_MISC, \ + .reg_offset = offset, \ +} + +#define STAT_GENET_Q(num) \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ + tx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ + tx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ + rx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ + rx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ + rx_rings[num].errors), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ + rx_rings[num].dropped) + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define BCMGENET_STAT_OFFSET 0xc + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { + /* general stats */ + STAT_NETDEV(rx_packets), + STAT_NETDEV(tx_packets), + STAT_NETDEV(rx_bytes), + STAT_NETDEV(tx_bytes), + STAT_NETDEV(rx_errors), + STAT_NETDEV(tx_errors), + STAT_NETDEV(rx_dropped), + STAT_NETDEV(tx_dropped), + STAT_NETDEV(multicast), + /* UniMAC RSV counters */ + STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), + STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), + STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), + STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), + STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), + STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), + STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), + STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), + STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), + STAT_GENET_MIB_RX("rx_control", mib.rx.cf), + STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), + STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), + STAT_GENET_MIB_RX("rx_align", mib.rx.aln), + STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), + STAT_GENET_MIB_RX("rx_code", mib.rx.cde), + STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), + STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), + STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), + STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), + STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), + STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), + STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), + STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), + /* UniMAC TSV counters */ + STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), + STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), + STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), + STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), + STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), + STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), + STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), + STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), + STAT_GENET_MIB_TX("tx_control", mib.tx.cf), + STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), + STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), + STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), + STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), + STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), + STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), + STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), + STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), + STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), + STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), + STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), + STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), + STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), + STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), + /* UniMAC RUNT counters */ + STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), + STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), + STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), + STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* Misc UniMAC counters */ + STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, + UMAC_RBUF_OVFL_CNT_V1), + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, + UMAC_RBUF_ERR_CNT_V1), + STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb), + STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed", + mib.tx_realloc_tsb_failed), + /* Per TX queues */ + STAT_GENET_Q(0), + STAT_GENET_Q(1), + STAT_GENET_Q(2), + STAT_GENET_Q(3), + STAT_GENET_Q(16), +}; + +#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) + +static void bcmgenet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); +} + +static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMGENET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmgenet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) +{ + u16 new_offset; + u32 val; + + switch (offset) { + case UMAC_RBUF_OVFL_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_OVFL_CNT_V2; + else + new_offset = RBUF_OVFL_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + case UMAC_RBUF_ERR_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_ERR_CNT_V2; + else + new_offset = RBUF_ERR_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + default: + val = bcmgenet_umac_readl(priv, offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, offset); + break; + } + + return val; +} + +static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) +{ + int i, j = 0; + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + u8 offset = 0; + u32 val = 0; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + switch (s->type) { + case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: + continue; + case BCMGENET_STAT_RUNT: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_TX: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_RX: + val = bcmgenet_umac_readl(priv, + UMAC_MIB_START + j + offset); + offset = 0; /* Reset Offset */ + break; + case BCMGENET_STAT_MISC: + if (GENET_IS_V1(priv)) { + val = bcmgenet_umac_readl(priv, s->reg_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, + s->reg_offset); + } else { + val = bcmgenet_update_stat_misc(priv, + s->reg_offset); + } + break; + } + + j += s->stat_sizeof; + p = (char *)priv + s->stat_offset; + *(u32 *)p = val; + } +} + +static void bcmgenet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_running(dev)) + bcmgenet_update_mib_counters(priv); + + dev->netdev_ops->ndo_get_stats(dev); + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + if (s->type == BCMGENET_STAT_NETDEV) + p = (char *)&dev->stats; + else + p = (char *)priv; + p += s->stat_offset; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; + } +} + +static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; + u32 reg; + + if (enable && !priv->clk_eee_enabled) { + clk_prepare_enable(priv->clk_eee); + priv->clk_eee_enabled = true; + } + + reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = bcmgenet_readl(priv->base + off); + if (enable) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); + bcmgenet_writel(reg, priv->base + off); + + /* Do the same for thing for RBUF */ + reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); + if (enable) + reg |= RBUF_EEE_EN | RBUF_PM_EN; + else + reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); + bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); + + if (!enable && priv->clk_eee_enabled) { + clk_disable_unprepare(priv->clk_eee); + priv->clk_eee_enabled = false; + } + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; +} + +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(dev->phydev, e); +} + +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + int ret = 0; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcmgenet_eee_enable_set(dev, false); + } else { + ret = phy_init_eee(dev->phydev, 0); + if (ret) { + netif_err(priv, hw, dev, "EEE initialization failed\n"); + return ret; + } + + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); + bcmgenet_eee_enable_set(dev, true); + } + + return phy_ethtool_set_eee(dev->phydev, e); +} + +static int bcmgenet_validate_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_usrip4_spec *l4_mask; + struct ethhdr *eth_mask; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) { + netdev_err(dev, "rxnfc: Invalid location (%d)\n", + cmd->fs.location); + return -EINVAL; + } + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case IP_USER_FLOW: + l4_mask = &cmd->fs.m_u.usr_ip4_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(l4_mask->ip4src) || + VALIDATE_MASK(l4_mask->ip4dst) || + VALIDATE_MASK(l4_mask->l4_4_bytes) || + VALIDATE_MASK(l4_mask->proto) || + VALIDATE_MASK(l4_mask->ip_ver) || + VALIDATE_MASK(l4_mask->tos)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + case ETHER_FLOW: + eth_mask = &cmd->fs.m_u.ether_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(eth_mask->h_dest) || + VALIDATE_MASK(eth_mask->h_source) || + VALIDATE_MASK(eth_mask->h_proto)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + default: + netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n", + cmd->fs.flow_type); + return -EINVAL; + } + + if ((cmd->fs.flow_type & FLOW_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || + VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { + netdev_err(dev, "rxnfc: user-def not supported\n"); + return -EINVAL; + } + } + + if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + } + + return 0; +} + +static int bcmgenet_insert_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *loc_rule; + int err; + + if (priv->hw_params->hfb_filter_size < 128) { + netdev_err(dev, "rxnfc: Not supported by this device\n"); + return -EINVAL; + } + + if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && + cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { + netdev_err(dev, "rxnfc: Unsupported action (%llu)\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + + err = bcmgenet_validate_flow(dev, cmd); + if (err) + return err; + + loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&loc_rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memcpy(&loc_rule->fs, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); + + list_add_tail(&loc_rule->list, &priv->rxnfc_list); + + return 0; +} + +static int bcmgenet_delete_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[cmd->fs.location]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { + err = -ENOENT; + goto out; + } + + if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); + +out: + return err; +} + +static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = bcmgenet_insert_flow(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = bcmgenet_delete_flow(dev, cmd); + break; + default: + netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", + cmd->cmd); + return -EINVAL; + } + + return err; +} + +static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, + int loc) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[loc]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) + err = -ENOENT; + else + memcpy(&cmd->fs, &rule->fs, + sizeof(struct ethtool_rx_flow_spec)); + + return err; +} + +static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv) +{ + struct list_head *pos; + int res = 0; + + list_for_each(pos, &priv->rxnfc_list) + res++; + + return res; +} + +static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + int i = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->hw_params->rx_queues ?: 1; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bcmgenet_get_num_flows(priv); + cmd->data = MAX_NUM_OF_FS_RULES; + break; + case ETHTOOL_GRXCLSRULE: + err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (i < cmd->rule_cnt) + rule_locs[i++] = rule->fs.location; + cmd->rule_cnt = i; + cmd->data = MAX_NUM_OF_FS_RULES; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +/* standard ethtool support functions. */ +static const struct ethtool_ops bcmgenet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .begin = bcmgenet_begin, + .complete = bcmgenet_complete, + .get_strings = bcmgenet_get_strings, + .get_sset_count = bcmgenet_get_sset_count, + .get_ethtool_stats = bcmgenet_get_ethtool_stats, + .get_drvinfo = bcmgenet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = bcmgenet_get_msglevel, + .set_msglevel = bcmgenet_set_msglevel, + .get_wol = bcmgenet_get_wol, + .set_wol = bcmgenet_set_wol, + .get_eee = bcmgenet_get_eee, + .set_eee = bcmgenet_set_eee, + .nway_reset = phy_ethtool_nway_reset, + .get_coalesce = bcmgenet_get_coalesce, + .set_coalesce = bcmgenet_set_coalesce, + .get_link_ksettings = bcmgenet_get_link_ksettings, + .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, + .get_rxnfc = bcmgenet_get_rxnfc, + .set_rxnfc = bcmgenet_set_rxnfc, +}; + +/* Power down the unimac, based on mode. */ +static int bcmgenet_power_down(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + int ret = 0; + u32 reg; + + switch (mode) { + case GENET_POWER_CABLE_SENSE: + phy_detach(priv->dev->phydev); + break; + + case GENET_POWER_WOL_MAGIC: + ret = bcmgenet_wol_power_down_cfg(priv, mode); + break; + + case GENET_POWER_PASSIVE: + /* Power down LED */ + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + if (GENET_IS_V5(priv)) + reg |= EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR; + else + reg |= EXT_PWR_DOWN_PHY; + + reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + + bcmgenet_phy_power_set(priv->dev, false); + } + break; + default: + break; + } + + return ret; +} + +static void bcmgenet_power_up(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (!(priv->hw_params->flags & GENET_HAS_EXT)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + + switch (mode) { + case GENET_POWER_PASSIVE: + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); + if (GENET_IS_V5(priv)) { + reg &= ~(EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR); + reg |= EXT_PHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + mdelay(1); + + reg &= ~EXT_PHY_RESET; + } else { + reg &= ~EXT_PWR_DOWN_PHY; + reg |= EXT_PWR_DN_EN_LD; + } + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_phy_power_set(priv->dev, true); + break; + + case GENET_POWER_CABLE_SENSE: + /* enable APD */ + if (!GENET_IS_V5(priv)) { + reg |= EXT_PWR_DN_EN_LD; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + break; + case GENET_POWER_WOL_MAGIC: + bcmgenet_wol_power_up_cfg(priv, mode); + return; + default: + break; + } +} + +static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Advancing local write pointer */ + if (ring->write_ptr == ring->end_ptr) + ring->write_ptr = ring->cb_ptr; + else + ring->write_ptr++; + + return tx_cb_ptr; +} + +static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Rewinding local write pointer */ + if (ring->write_ptr == ring->cb_ptr) + ring->write_ptr = ring->end_ptr; + else + ring->write_ptr--; + + return tx_cb_ptr; +} + +static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_SET); +} + +/* Simple helper to free a transmit control block's resources + * Returns an skb when the last transmit control block associated with the + * skb is freed. The skb should be freed by the caller if necessary. + */ +static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + + if (skb) { + cb->skb = NULL; + if (cb == GENET_CB(skb)->first_cb) + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + + if (cb == GENET_CB(skb)->last_cb) + return skb; + + } else if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_page(dev, + dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return NULL; +} + +/* Simple helper to free a receive control block's resources */ +static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + cb->skb = NULL; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return skb; +} + +/* Unlocked version of the reclaim routine */ +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int txbds_processed = 0; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; + unsigned int txbds_ready; + unsigned int c_index; + struct sk_buff *skb; + + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_CLEAR); + else + bcmgenet_intrl2_1_writel(priv, (1 << ring->index), + INTRL2_CPU_CLEAR); + + /* Compute how many buffers are transmitted since last xmit call */ + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) + & DMA_C_INDEX_MASK; + txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, tx_done, dev, + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); + + /* Reclaim transmitted buffers */ + while (txbds_processed < txbds_ready) { + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, + &priv->tx_cbs[ring->clean_ptr]); + if (skb) { + pkts_compl++; + bytes_compl += GENET_CB(skb)->bytes_sent; + if (!priv->ecdev) + dev_consume_skb_any(skb); + } + + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; + } + + ring->free_bds += txbds_processed; + ring->c_index = c_index; + + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + + if (!priv->ecdev) + netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), + pkts_compl, bytes_compl); + + return txbds_processed; +} + +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + unsigned int released; + + spin_lock_bh(&ring->lock); + released = __bcmgenet_tx_reclaim(dev, ring); + spin_unlock_bh(&ring->lock); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + struct netdev_queue *txq; + + if (ring->priv->ecdev) + return 0; + + spin_lock(&ring->lock); + work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); + netif_tx_wake_queue(txq); + } + spin_unlock(&ring->lock); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring); + + return 0; + } + + return budget; +} + +static void bcmgenet_tx_reclaim_all(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_is_multiqueue(dev)) { + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); + } + + bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); +} + +/* Reallocate the SKB to put enough headroom in front of it and insert + * the transmit checksum offsets in the descriptors + */ +static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, + struct sk_buff *skb) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct status_64 *status = NULL; + struct sk_buff *new_skb; + u16 offset; + u8 ip_proto; + __be16 ip_ver; + u32 tx_csum_info; + + if (unlikely(skb_headroom(skb) < sizeof(*status))) { + /* If 64 byte status block enabled, must make sure skb has + * enough headroom for us to insert 64B status block. + */ + BUG_ON(priv->ecdev); + new_skb = skb_realloc_headroom(skb, sizeof(*status)); + if (!new_skb) { + dev_kfree_skb_any(skb); + priv->mib.tx_realloc_tsb_failed++; + dev->stats.tx_dropped++; + return NULL; + } + dev_consume_skb_any(skb); + skb = new_skb; + priv->mib.tx_realloc_tsb++; + } + + skb_push(skb, sizeof(*status)); + status = (struct status_64 *)skb->data; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ip_ver = skb->protocol; + switch (ip_ver) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + /* don't use UDP flag */ + ip_proto = 0; + break; + } + + offset = skb_checksum_start_offset(skb) - sizeof(*status); + tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | + (offset + skb->csum_offset) | + STATUS_TX_CSUM_LV; + + /* Set the special UDP flag for UDP */ + if (ip_proto == IPPROTO_UDP) + tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; + + status->tx_csum_info = tx_csum_info; + } + + return skb; +} + +static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_tx_ring *ring = NULL; + struct enet_cb *tx_cb_ptr; + struct netdev_queue *txq; + int nr_frags, index; + dma_addr_t mapping; + unsigned int size; + skb_frag_t *frag; + u32 len_stat; + int ret; + int i; + + index = skb_get_queue_mapping(skb); + /* Mapping strategy: + * queue_mapping = 0, unclassified, packet xmited through ring16 + * queue_mapping = 1, goes to ring 0. (highest priority queue + * queue_mapping = 2, goes to ring 1. + * queue_mapping = 3, goes to ring 2. + * queue_mapping = 4, goes to ring 3. + */ + if (index == 0) + index = DESC_INDEX; + else + index -= 1; + + ring = &priv->tx_rings[index]; + txq = netdev_get_tx_queue(dev, ring->queue); + + nr_frags = skb_shinfo(skb)->nr_frags; + + spin_lock(&ring->lock); + if (ring->free_bds <= (nr_frags + 1)) { + if (!priv->ecdev && !netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + netdev_err(dev, + "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Retain how many bytes will be sent on the wire, without TSB inserted + * by transmit checksum offload + */ + GENET_CB(skb)->bytes_sent = skb->len; + + /* add the Transmit Status Block */ + skb = bcmgenet_add_tsb(dev, skb); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + + for (i = 0; i <= nr_frags; i++) { + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + BUG_ON(!tx_cb_ptr); + + if (!i) { + /* Transmit single SKB or head of fragment list */ + GENET_CB(skb)->first_cb = tx_cb_ptr; + size = skb_headlen(skb); + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + /* xmit fragment */ + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); + } + + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); + ret = NETDEV_TX_OK; + goto out_unmap_frags; + } + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, size); + + tx_cb_ptr->skb = skb; + + len_stat = (size << DMA_BUFLENGTH_SHIFT) | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ + if (!i) { + len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_stat |= DMA_TX_DO_CSUM; + } + if (i == nr_frags) + len_stat |= DMA_EOP; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); + } + + GENET_CB(skb)->last_cb = tx_cb_ptr; + if (priv->ecdev) { + /* give back headroom for next ethercat invocation */ + __skb_pull(skb, sizeof(struct status_64)); + } + skb_tx_timestamp(skb); + + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= nr_frags + 1; + ring->prod_index += nr_frags + 1; + ring->prod_index &= DMA_P_INDEX_MASK; + + if (!priv->ecdev) { + netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); + + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) + netif_tx_stop_queue(txq); + } + + if (priv->ecdev || !netdev_xmit_more() || netif_xmit_stopped(txq)) + /* Packets are ready, update producer index */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); +out: + spin_unlock(&ring->lock); + + return ret; + +out_unmap_frags: + /* Back up for failed control block mapping */ + bcmgenet_put_txcb(priv, ring); + + /* Unmap successfully mapped control blocks */ + while (i-- > 0) { + tx_cb_ptr = bcmgenet_put_txcb(priv, ring); + bcmgenet_free_tx_cb(kdev, tx_cb_ptr); + } + + if (!priv->ecdev) + dev_kfree_skb(skb); + goto out; +} + +static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) +{ + struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; + struct sk_buff *rx_skb; + dma_addr_t mapping; + + /* Allocate a new Rx skb */ + skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, + GFP_ATOMIC | __GFP_NOWARN); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb allocation failed\n", __func__); + return NULL; + } + + /* DMA-map the new Rx skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + dev_kfree_skb_any(skb); + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + return NULL; + } + + /* Grab the current Rx skb from the ring and DMA-unmap it */ + rx_skb = bcmgenet_free_rx_cb(kdev, cb); + + /* Put the new Rx skb on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + + /* Return the current Rx skb to caller */ + return rx_skb; +} + +/* bcmgenet_desc_rx - descriptor based rx process. + * this could be called from bottom half, or from NAPI polling method. + */ +static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, + unsigned int budget) +{ + struct bcmgenet_priv *priv = ring->priv; + struct net_device *dev = priv->dev; + struct device *kdev = &priv->pdev->dev; + struct enet_cb *cb; + struct sk_buff *skb; + u32 dma_length_status; + unsigned long dma_flag; + int len; + unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int bytes_processed = 0; + unsigned int p_index, mask; + unsigned int discards; + + if (!priv->ecdev) { + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) { + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_CLEAR); + } else { + mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); + bcmgenet_intrl2_1_writel(priv, + mask, + INTRL2_CPU_CLEAR); + } + } + + p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); + + discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & + DMA_P_INDEX_DISCARD_CNT_MASK; + if (discards > ring->old_discards) { + discards = discards - ring->old_discards; + ring->errors += discards; + ring->old_discards += discards; + + /* Clear HW register when we reach 75% of maximum 0xFFFF */ + if (ring->old_discards >= 0xC000) { + ring->old_discards = 0; + bcmgenet_rdma_ring_writel(priv, ring->index, 0, + RDMA_PROD_INDEX); + } + } + + p_index &= DMA_P_INDEX_MASK; + rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, rx_status, dev, + "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); + + while ((rxpktprocessed < rxpkttoprocess) && + (rxpktprocessed < budget)) { + struct status_64 *status; + __be16 rx_csum; + + cb = &priv->rx_cbs[ring->read_ptr]; + if (priv->ecdev) + /* DMA unmap current skb */ + skb = bcmgenet_free_rx_cb(kdev, cb); + else + skb = bcmgenet_rx_refill(priv, cb); + + if (unlikely(!skb)) { + ring->dropped++; + goto next; + } + + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + if (dev->features & NETIF_F_RXCSUM) { + rx_csum = (__force __be16)(status->rx_csum & 0xffff); + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + /* DMA flags and length are still valid no matter how + * we got the Receive Status Vector (64B RSB or register) + */ + dma_flag = dma_length_status & 0xffff; + len = dma_length_status >> DMA_BUFLENGTH_SHIFT; + + netif_dbg(priv, rx_status, dev, + "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); + + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { + netif_err(priv, rx_status, dev, + "dropping fragmented packet!\n"); + ring->errors++; + if (!priv->ecdev) + dev_kfree_skb_any(skb); + goto next; + } + + /* report errors */ + if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER))) { + netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", + (unsigned int)dma_flag); + if (dma_flag & DMA_RX_CRC_ERROR) + dev->stats.rx_crc_errors++; + if (dma_flag & DMA_RX_OV) + dev->stats.rx_over_errors++; + if (dma_flag & DMA_RX_NO) + dev->stats.rx_frame_errors++; + if (dma_flag & DMA_RX_LG) + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (!priv->ecdev) + dev_kfree_skb_any(skb); + goto next; + } /* error packet */ + + if (!priv->ecdev) { + skb_put(skb, len); + + /* remove RSB and hardware 2bytes added for IP alignment */ + skb_pull(skb, 66); + } + len -= 66; + + if (priv->crc_fwd_en) { + if (!priv->ecdev) + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + bytes_processed += len; + + /*Finish setting up the received SKB and send it to the kernel*/ + skb->protocol = eth_type_trans(skb, priv->dev); + ring->packets++; + ring->bytes += len; + if (dma_flag & DMA_RX_MULT) + dev->stats.multicast++; + + if (priv->ecdev) { + dma_addr_t mapping; + /* skip status block and padding in skb */ + const unsigned char *data = skb->data + 66; + + ecdev_receive(priv->ecdev, data, len); + + /* remap skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + dev_kfree_skb_any(skb); + } else { + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + } + } else { + /* Notify kernel */ + napi_gro_receive(&ring->napi, skb); + } + netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); + +next: + rxpktprocessed++; + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); + } + + ring->dim.bytes = bytes_processed; + ring->dim.packets = rxpktprocessed; + + return rxpktprocessed; +} + +/* Rx NAPI polling method */ +static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_rx_ring *ring = container_of(napi, + struct bcmgenet_rx_ring, napi); + struct dim_sample dim_sample = {}; + unsigned int work_done; + + if (ring->priv->ecdev) + return 0; + + work_done = bcmgenet_desc_rx(ring, budget); + + if (work_done < budget) { + napi_complete_done(napi, work_done); + ring->int_enable(ring); + } + + if (ring->dim.use_dim) { + dim_update_sample(ring->dim.event_ctr, ring->dim.packets, + ring->dim.bytes, &dim_sample); + net_dim(&ring->dim.dim, dim_sample); + } + + return work_done; +} + + +/** +* ec_poll - EtherCAT poll routine +* @netdev: net device structure +* +* This function can never fail. +* +**/ +static void ec_poll(struct net_device *netdev) +{ + struct bcmgenet_priv *priv = netdev_priv(netdev); + unsigned i; + int budget = 64; + + bcmgenet_tx_reclaim_all(netdev); + for (i = 0; i < priv->hw_params->rx_queues; i++) { + bcmgenet_desc_rx(&priv->rx_rings[i], budget); + } + + bcmgenet_desc_rx(&priv->rx_rings[DESC_INDEX], budget); +} + +static void bcmgenet_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct bcmgenet_net_dim *ndim = + container_of(dim, struct bcmgenet_net_dim, dim); + struct bcmgenet_rx_ring *ring = + container_of(ndim, struct bcmgenet_rx_ring, dim); + struct dim_cq_moder cur_profile = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); + dim->state = DIM_START_MEASURE; +} + +/* Assign skb to RX DMA descriptor. */ +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) +{ + struct enet_cb *cb; + struct sk_buff *skb; + int i; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* loop here for each buffer needing assign */ + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; + skb = bcmgenet_rx_refill(priv, cb); + if (skb) + dev_consume_skb_any(skb); + if (!cb->skb) + return -ENOMEM; + } + + return 0; +} + +static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) +{ + struct sk_buff *skb; + struct enet_cb *cb; + int i; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[i]; + + skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); + if (skb) + dev_consume_skb_any(skb); + } +} + +static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) +{ + u32 reg; + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + return; + if (enable) + reg |= mask; + else + reg &= ~mask; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-size packet + * to be processed + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static void reset_umac(struct bcmgenet_priv *priv) +{ + /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ + bcmgenet_rbuf_ctrl_set(priv, 0); + udelay(10); + + /* issue soft reset and disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); + udelay(2); +} + +static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) +{ + /* Mask all interrupts.*/ + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); +} + +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + int0_enable |= UMAC_IRQ_PHY_DET_R; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + +static void init_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + u32 reg; + u32 int0_enable = 0; + + dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); + + reset_umac(priv); + + /* clear tx/rx counter */ + bcmgenet_umac_writel(priv, + MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, + UMAC_MIB_CTRL); + bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); + + bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* init tx registers, enable TSB */ + reg = bcmgenet_tbuf_ctrl_get(priv); + reg |= TBUF_64B_EN; + bcmgenet_tbuf_ctrl_set(priv, reg); + + /* init rx registers, enable ip header optimization and RSB */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + reg |= RBUF_ALIGN_2B | RBUF_64B_EN; + bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + + /* enable rx checksumming */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (priv->crc_fwd_en) + reg |= RBUF_SKIP_FCS; + else + reg &= ~RBUF_SKIP_FCS; + bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL); + + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) + bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); + + bcmgenet_intr_disable(priv); + + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + reg = bcmgenet_bp_mc_get(priv); + reg |= BIT(priv->hw_params->bp_in_en_shift); + + /* bp_mask: back pressure mask */ + if (netif_is_multiqueue(priv->dev)) + reg |= priv->hw_params->bp_in_mask; + else + reg &= ~priv->hw_params->bp_in_mask; + bcmgenet_bp_mc_set(priv, reg); + } + + /* Enable MDIO interrupts on GENET v3+ */ + if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) { + int0_enable |= UMAC_IRQ_MDIO_ERROR; + if (!priv->ecdev) + int0_enable |= UMAC_IRQ_MDIO_DONE; + } + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + + dev_dbg(kdev, "done init umac\n"); +} + +static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, + void (*cb)(struct work_struct *work)) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + struct dim_cq_moder moder; + u32 usecs, pkts; + + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ring->priv->ecdev) + return; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_rx_moderation(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +/* Initialize a Tx ring along with corresponding hardware registers */ +static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + u32 flow_period_val = 0; + + spin_lock_init(&ring->lock); + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->queue = 0; + ring->int_enable = bcmgenet_tx_ring16_int_enable; + ring->int_disable = bcmgenet_tx_ring16_int_disable; + } else { + ring->queue = index + 1; + ring->int_enable = bcmgenet_tx_ring_int_enable; + ring->int_disable = bcmgenet_tx_ring_int_disable; + } + ring->cbs = priv->tx_cbs + start_ptr; + ring->size = size; + ring->clean_ptr = start_ptr; + ring->c_index = 0; + ring->free_bds = size; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + ring->prod_index = 0; + + /* Set flow period for ring != 16 */ + if (index != DESC_INDEX) + flow_period_val = ENET_MAX_MTU_SIZE << 16; + + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); + /* Disable rate control for now */ + bcmgenet_tdma_ring_writel(priv, index, flow_period_val, + TDMA_FLOW_PERIOD); + bcmgenet_tdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + + /* Set start and end address, read and write pointers */ + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_READ_PTR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_WRITE_PTR); + bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + if (priv->ecdev) + return; + /* Initialize Tx NAPI */ + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); +} + +/* Initialize a RDMA ring */ +static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + int ret; + + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->int_enable = bcmgenet_rx_ring16_int_enable; + ring->int_disable = bcmgenet_rx_ring16_int_disable; + } else { + ring->int_enable = bcmgenet_rx_ring_int_enable; + ring->int_disable = bcmgenet_rx_ring_int_disable; + } + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) + return ret; + + bcmgenet_init_dim(ring, bcmgenet_dim_work); + bcmgenet_init_rx_coalesce(ring); + + if (!priv->ecdev) { + /* Initialize Rx NAPI */ + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, + NAPI_POLL_WEIGHT); + } + + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + bcmgenet_rdma_ring_writel(priv, index, + (DMA_FC_THRESH_LO << + DMA_XOFF_THRESHOLD_SHIFT) | + DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + return ret; +} + +static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Tx queues + * + * Queues 0-3 are priority-based, each one has 32 descriptors, + * with queue 0 being the highest priority queue. + * + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. + * + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] + */ +static void bcmgenet_init_tx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; + u32 dma_priority[3] = {0, 0, 0}; + + dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Enable strict priority arbiter mode */ + bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + + /* Initialize Tx priority queues */ + for (i = 0; i < priv->hw_params->tx_queues; i++) { + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(i)] |= + ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); + } + + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= + ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << + DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ + bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); + bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); + bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); +} + +static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + + /* Enable rings */ + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Configure ring as descriptor ring and re-enable DMA if enabled */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; +} + +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + u32 dma_ctrl; + int i; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + return ret; +} + +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + struct netdev_queue *txq; + int i; + + bcmgenet_fini_rx_napi(priv); + bcmgenet_fini_tx_napi(priv); + + for (i = 0; i < priv->num_tx_bds; i++) + dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, + priv->tx_cbs + i)); + + for (i = 0; i < priv->hw_params->tx_queues; i++) { + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); + netdev_tx_reset_queue(txq); + } + + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); + netdev_tx_reset_queue(txq); + + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); +} + +/* init_edma: Initialize DMA control register */ +static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +{ + int ret; + unsigned int i; + struct enet_cb *cb; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } + + /* Initialize common TX ring structures */ + priv->tx_bds = priv->base + priv->hw_params->tdma_offset; + priv->num_tx_bds = TOTAL_DESC; + priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->tx_cbs) { + kfree(priv->rx_cbs); + return -ENOMEM; + } + + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } + + /* Init rDma */ + bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); + if (ret) { + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); + return ret; + } + + /* Init tDma */ + bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); + + return 0; +} + +/* Interrupt bottom half */ +static void bcmgenet_irq_task(struct work_struct *work) +{ + unsigned int status; + struct bcmgenet_priv *priv = container_of( + work, struct bcmgenet_priv, bcmgenet_irq_work); + + netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + + spin_lock_irq(&priv->lock); + status = priv->irq0_stat; + priv->irq0_stat = 0; + spin_unlock_irq(&priv->lock); + + if (status & UMAC_IRQ_PHY_DET_R && + priv->dev->phydev->autoneg != AUTONEG_ENABLE) { + phy_init_hw(priv->dev->phydev); + genphy_config_aneg(priv->dev->phydev); + } + + /* Link UP/DOWN event */ + if (status & UMAC_IRQ_LINK_EVENT) + phy_mac_interrupt(priv->dev->phydev); + +} + +/* bcmgenet_isr1: handle Rx and Tx priority queues */ +static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int index, status; + + /* Read irq status */ + status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "%s: IRQ=0x%x\n", __func__, status); + + /* Check Rx priority queue interrupts */ + for (index = 0; index < priv->hw_params->rx_queues; index++) { + if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + continue; + + rx_ring = &priv->rx_rings[index]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + /* Check Tx priority queue interrupts */ + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(status & BIT(index))) + continue; + + tx_ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + return IRQ_HANDLED; +} + +/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ +static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int status; + unsigned long flags; + + /* Read irq status */ + status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "IRQ=0x%x\n", status); + + if (status & UMAC_IRQ_RXDMA_DONE) { + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + if (status & UMAC_IRQ_TXDMA_DONE) { + tx_ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + wake_up(&priv->wq); + } + + /* all other interested interrupts handled in bottom half */ + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); + if (status) { + /* Save irq status for bottom-half processing. */ + spin_lock_irqsave(&priv->lock, flags); + priv->irq0_stat |= status; + spin_unlock_irqrestore(&priv->lock, flags); + + schedule_work(&priv->bcmgenet_irq_work); + } + + return IRQ_HANDLED; +} + +static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) +{ + /* Acknowledge the interrupt */ + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcmgenet_poll_controller(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Invoke the main RX/TX interrupt handler */ + disable_irq(priv->irq0); + bcmgenet_isr0(priv->irq0, priv); + enable_irq(priv->irq0); + + /* And the interrupt handler for RX/TX priority queues */ + disable_irq(priv->irq1); + bcmgenet_isr1(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + +static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) +{ + u32 reg; + + reg = bcmgenet_rbuf_ctrl_get(priv); + reg |= BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + + reg &= ~BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); +} + +static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, + const unsigned char *addr) +{ + bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0); + bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1); +} + +static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + u32 addr_tmp; + + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0); + put_unaligned_be32(addr_tmp, &addr[0]); + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1); + put_unaligned_be16(addr_tmp, &addr[4]); +} + +/* Returns a reusable dma control register value */ +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +{ + unsigned int i; + u32 reg; + u32 dma_ctrl; + + /* disable DMA */ + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); + udelay(10); + bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + + return dma_ctrl; +} + +static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) +{ + u32 reg; + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static void bcmgenet_netif_start(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Start the network engine */ + bcmgenet_set_rx_mode(dev); + bcmgenet_enable_rx_napi(priv); + + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); + + bcmgenet_enable_tx_napi(priv); + + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + + phy_start(dev->phydev); +} + +static int bcmgenet_open(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + int ret; + + netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); + + /* Turn on the clock */ + clk_prepare_enable(priv->clk); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + /* take MAC out of reset */ + bcmgenet_umac_reset(priv); + + init_umac(priv); + + /* Apply features again in case we changed them while interface was + * down + */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto err_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + /* HFB init */ + bcmgenet_hfb_init(priv); + + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq0); + goto err_fini_dma; + } + + ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq1); + goto err_irq0; + } + + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } + + bcmgenet_netif_start(dev); + + if (!priv->ecdev) + netif_tx_start_all_queues(dev); + + return 0; + +err_irq1: + free_irq(priv->irq1, priv); +err_irq0: + free_irq(priv->irq0, priv); +err_fini_dma: + bcmgenet_dma_teardown(priv); + bcmgenet_fini_dma(priv); +err_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static void bcmgenet_netif_stop(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + bcmgenet_disable_tx_napi(priv); + netif_tx_disable(dev); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + bcmgenet_dma_teardown(priv); + + /* Disable MAC transmit. TX DMA disabled must be done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + + phy_stop(dev->phydev); + bcmgenet_disable_rx_napi(priv); + bcmgenet_intr_disable(priv); + + /* Wait for pending work items to complete. Since interrupts are + * disabled no new work will be scheduled. + */ + cancel_work_sync(&priv->bcmgenet_irq_work); + + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); +} + +static int bcmgenet_close(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); + + bcmgenet_netif_stop(dev); + + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(dev->phydev); + + free_irq(priv->irq0, priv); + free_irq(priv->irq1, priv); + + if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = ring->priv; + u32 p_index, c_index, intsts, intmsk; + struct netdev_queue *txq; + unsigned int free_bds; + bool txq_stopped; + + if (!netif_msg_tx_err(priv)) + return; + + txq = netdev_get_tx_queue(priv->dev, ring->queue); + + spin_lock(&ring->lock); + if (ring->index == DESC_INDEX) { + intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; + } else { + intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = 1 << ring->index; + } + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); + txq_stopped = netif_tx_queue_stopped(txq); + free_bds = ring->free_bds; + spin_unlock(&ring->lock); + + netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" + "TX queue status: %s, interrupts: %s\n" + "(sw)free_bds: %d (sw)size: %d\n" + "(sw)p_index: %d (hw)p_index: %d\n" + "(sw)c_index: %d (hw)c_index: %d\n" + "(sw)clean_p: %d (sw)write_p: %d\n" + "(sw)cb_ptr: %d (sw)end_ptr: %d\n", + ring->index, ring->queue, + txq_stopped ? "stopped" : "active", + intsts & intmsk ? "enabled" : "disabled", + free_bds, ring->size, + ring->prod_index, p_index & DMA_P_INDEX_MASK, + ring->c_index, c_index & DMA_C_INDEX_MASK, + ring->clean_ptr, ring->write_ptr, + ring->cb_ptr, ring->end_ptr); +} + +static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 int0_enable = 0; + u32 int1_enable = 0; + unsigned int q; + + netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + bcmgenet_dump_tx_queue(&priv->tx_rings[q]); + bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); + + bcmgenet_tx_reclaim_all(dev); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + int1_enable |= (1 << q); + + int0_enable = UMAC_IRQ_TXDMA_DONE; + + /* Re-enable TX interrupts if disabled */ + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + + netif_trans_update(dev); + + dev->stats.tx_errors++; + + netif_tx_wake_all_queues(dev); +} + +#define MAX_MDF_FILTER 17 + +static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, + const unsigned char *addr, + int *i) +{ + bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], + UMAC_MDF_ADDR + (*i * 4)); + bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | + addr[4] << 8 | addr[5], + UMAC_MDF_ADDR + ((*i + 1) * 4)); + *i += 2; +} + +static void bcmgenet_set_rx_mode(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, nfilter; + u32 reg; + + netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); + + /* Number of filters needed */ + nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2; + + /* + * Turn on promicuous mode for three scenarios + * 1. IFF_PROMISC flag is set + * 2. IFF_ALLMULTI flag is set + * 3. The number of filters needed exceeds the number filters + * supported by the hardware. + */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || + (nfilter > MAX_MDF_FILTER)) { + reg |= CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); + return; + } else { + reg &= ~CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } + + /* update MDF filter */ + i = 0; + /* Broadcast */ + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); + /* my own address.*/ + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); + + /* Unicast */ + netdev_for_each_uc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Multicast */ + netdev_for_each_mc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Enable filters */ + reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); +} + +/* Set the hardware MAC address. */ +static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + /* Setting the MAC address at the hardware level is not possible + * without disabling the UniMAC RX/TX enable bits. + */ + if (netif_running(dev)) + return -EBUSY; + +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(dev, addr->sa_data); +#else + ether_addr_copy(dev->dev_addr, addr->sa_data); +#endif + return 0; +} + +static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long tx_bytes = 0, tx_packets = 0; + unsigned long rx_bytes = 0, rx_packets = 0; + unsigned long rx_errors = 0, rx_dropped = 0; + struct bcmgenet_tx_ring *tx_ring; + struct bcmgenet_rx_ring *rx_ring; + unsigned int q; + + for (q = 0; q < priv->hw_params->tx_queues; q++) { + tx_ring = &priv->tx_rings[q]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + } + tx_ring = &priv->tx_rings[DESC_INDEX]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + + for (q = 0; q < priv->hw_params->rx_queues; q++) { + rx_ring = &priv->rx_rings[q]; + + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + } + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_packets = rx_packets; + dev->stats.rx_errors = rx_errors; + dev->stats.rx_missed_errors = rx_errors; + dev->stats.rx_dropped = rx_dropped; + return &dev->stats; +} + +static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || + priv->phy_interface != PHY_INTERFACE_MODE_MOCA) + return -EOPNOTSUPP; + + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + return 0; +} + +static const struct net_device_ops bcmgenet_netdev_ops = { + .ndo_open = bcmgenet_open, + .ndo_stop = bcmgenet_close, + .ndo_start_xmit = bcmgenet_xmit, + .ndo_tx_timeout = bcmgenet_timeout, + .ndo_set_rx_mode = bcmgenet_set_rx_mode, + .ndo_set_mac_address = bcmgenet_set_mac_addr, + .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_set_features = bcmgenet_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcmgenet_poll_controller, +#endif + .ndo_get_stats = bcmgenet_get_stats, + .ndo_change_carrier = bcmgenet_change_carrier, +}; + +/* Array of GENET hardware parameters/characteristics */ +static struct bcmgenet_hw_params bcmgenet_hw_params[] = { + [GENET_V1] = { + .tx_queues = 0, + .tx_bds_per_q = 0, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .hfb_offset = 0x1000, + .rdma_offset = 0x2000, + .tdma_offset = 0x3000, + .words_per_bd = 2, + }, + [GENET_V2] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x1000, + .hfb_reg_offset = 0x2000, + .rdma_offset = 0x3000, + .tdma_offset = 0x4000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT, + }, + [GENET_V3] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x10000, + .tdma_offset = 0x11000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | + GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V4] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V5] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, +}; + +/* Infer hardware parameters from the detected GENET version */ +static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) +{ + struct bcmgenet_hw_params *params; + u32 reg; + u8 major; + u16 gphy_rev; + + if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v4; + } else if (GENET_IS_V3(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V2(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v2; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V1(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v1; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } + + /* enum genet_version starts at 1 */ + priv->hw_params = &bcmgenet_hw_params[priv->version]; + params = priv->hw_params; + + /* Read GENET HW version */ + reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); + major = (reg >> 24 & 0x0f); + if (major == 6) + major = 5; + else if (major == 5) + major = 4; + else if (major == 0) + major = 1; + if (major != priv->version) { + dev_err(&priv->pdev->dev, + "GENET version mismatch, got: %d, configured for: %d\n", + major, priv->version); + } + + /* Print the GENET core version */ + dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, + major, (reg >> 16) & 0x0f, reg & 0xffff); + + /* Store the integrated PHY revision for the MDIO probing function + * to pass this information to the PHY driver. The PHY driver expects + * to find the PHY major revision in bits 15:8 while the GENET register + * stores that information in bits 7:0, account for that. + * + * On newer chips, starting with PHY revision G0, a new scheme is + * deployed similar to the Starfighter 2 switch with GPHY major + * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 + * is reserved as well as special value 0x01ff, we have a small + * heuristic to check for the new GPHY revision and re-arrange things + * so the GPHY driver is happy. + */ + gphy_rev = reg & 0xffff; + + if (GENET_IS_V5(priv)) { + /* The EPHY revision should come from the MDIO registers of + * the PHY not from GENET. + */ + if (gphy_rev != 0) { + pr_warn("GENET is reporting EPHY revision: 0x%04x\n", + gphy_rev); + } + /* This is reserved so should require special treatment */ + } else if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + /* This is the good old scheme, just GPHY major, no minor nor patch */ + } else if ((gphy_rev & 0xf0) != 0) { + priv->gphy_rev = gphy_rev << 8; + /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ + } else if ((gphy_rev & 0xff00) != 0) { + priv->gphy_rev = gphy_rev; + } + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!(params->flags & GENET_HAS_40BITS)) + pr_warn("GENET does not support 40-bits PA\n"); +#endif + + pr_debug("Configuration for version: %d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" + "BP << en: %2d, BP msk: 0x%05x\n" + "HFB count: %2d, QTAQ msk: 0x%05x\n" + "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" + "RDMA: 0x%05x, TDMA: 0x%05x\n" + "Words/BD: %d\n", + priv->version, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, + params->bp_in_en_shift, params->bp_in_mask, + params->hfb_filter_cnt, params->qtag_mask, + params->tbuf_offset, params->hfb_offset, + params->hfb_reg_offset, + params->rdma_offset, params->tdma_offset, + params->words_per_bd); +} + +struct bcmgenet_plat_data { + enum bcmgenet_version version; + u32 dma_max_burst_length; +}; + +static const struct bcmgenet_plat_data v1_plat_data = { + .version = GENET_V1, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v2_plat_data = { + .version = GENET_V2, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v3_plat_data = { + .version = GENET_V3, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v4_plat_data = { + .version = GENET_V4, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v5_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data bcm2711_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = 0x08, +}; + +static const struct of_device_id bcmgenet_match[] = { + { .compatible = "brcm,genet-v1", .data = &v1_plat_data }, + { .compatible = "brcm,genet-v2", .data = &v2_plat_data }, + { .compatible = "brcm,genet-v3", .data = &v3_plat_data }, + { .compatible = "brcm,genet-v4", .data = &v4_plat_data }, + { .compatible = "brcm,genet-v5", .data = &v5_plat_data }, + { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcmgenet_match); + +static int bcmgenet_probe(struct platform_device *pdev) +{ + struct bcmgenet_platform_data *pd = pdev->dev.platform_data; + const struct bcmgenet_plat_data *pdata; + struct bcmgenet_priv *priv; + struct net_device *dev; + unsigned int i; + int err = -EIO; + + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); + if (!dev) { + dev_err(&pdev->dev, "can't allocate net device\n"); + return -ENOMEM; + } + + priv = netdev_priv(dev); + priv->irq0 = platform_get_irq(pdev, 0); + if (priv->irq0 < 0) { + err = priv->irq0; + goto err; + } + priv->irq1 = platform_get_irq(pdev, 1); + if (priv->irq1 < 0) { + err = priv->irq1; + goto err; + } + priv->wol_irq = platform_get_irq_optional(pdev, 2); + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + err = PTR_ERR(priv->base); + goto err; + } + + spin_lock_init(&priv->lock); + + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + dev->watchdog_timeo = 2 * HZ; + dev->ethtool_ops = &bcmgenet_ethtool_ops; + dev->netdev_ops = &bcmgenet_netdev_ops; + + priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); + + /* Set default features */ + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; + + /* Request the WOL interrupt and advertise suspend if available */ + priv->wol_irq_disabled = true; + err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, + dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + + /* Set the needed headroom to account for any possible + * features enabling/disabling at runtime + */ + dev->needed_headroom += 64; + + netdev_boot_setup_check(dev); + + priv->dev = dev; + priv->pdev = pdev; + + pdata = device_get_match_data(&pdev->dev); + if (pdata) { + priv->version = pdata->version; + priv->dma_max_burst_length = pdata->dma_max_burst_length; + } else { + priv->version = pd->genet_version; + priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; + } + + priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); + if (IS_ERR(priv->clk)) { + dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); + err = PTR_ERR(priv->clk); + goto err; + } + + err = clk_prepare_enable(priv->clk); + if (err) + goto err; + + bcmgenet_set_hw_params(priv); + + err = -EIO; + if (priv->hw_params->flags & GENET_HAS_40BITS) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_clk_disable; + + /* Mii wait queue */ + init_waitqueue_head(&priv->wq); + /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ + priv->rx_buf_len = RX_BUF_LENGTH; + INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); + + priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); + if (IS_ERR(priv->clk_wol)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); + err = PTR_ERR(priv->clk_wol); + goto err_clk_disable; + } + + priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); + if (IS_ERR(priv->clk_eee)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); + err = PTR_ERR(priv->clk_eee); + goto err_clk_disable; + } + + /* If this is an internal GPHY, power it on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + if (pd && !IS_ERR_OR_NULL(pd->mac_address)) +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(dev, pd->mac_address); +#else + ether_addr_copy(dev->dev_addr, pd->mac_address); +#endif + else +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + if (!device_get_ethdev_address(&pdev->dev, dev)) + if (has_acpi_companion(&pdev->dev)) { + u8 addr[ETH_ALEN]; + + bcmgenet_get_hw_addr(priv, addr); + eth_hw_addr_set(dev, addr); + } +#else + if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN)) + if (has_acpi_companion(&pdev->dev)) + bcmgenet_get_hw_addr(priv, dev->dev_addr); +#endif + + if (!is_valid_ether_addr(dev->dev_addr)) { + dev_warn(&pdev->dev, "using random Ethernet MAC\n"); + eth_hw_addr_random(dev); + } + + reset_umac(priv); + + err = bcmgenet_mii_init(dev); + if (err) + goto err_clk_disable; + + /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues + * just the ring 16 descriptor based TX + */ + netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); + netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + + /* Set default coalescing parameters */ + for (i = 0; i < priv->hw_params->rx_queues; i++) + priv->rx_rings[i].rx_max_coalesced_frames = 1; + priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; + + /* libphy will determine the link state */ + netif_carrier_off(dev); + + /* Turn off the main clock, WOL clock is handled separately */ + clk_disable_unprepare(priv->clk); + + priv->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + if (priv->ecdev) { + err = ecdev_open(priv->ecdev); + if (err) { + ecdev_withdraw(priv->ecdev); + priv->ecdev = NULL; + bcmgenet_mii_exit(dev); + goto err; + } + } else { + err = register_netdev(dev); + if (err) { + bcmgenet_mii_exit(dev); + goto err; + } + } + + return err; + +err_clk_disable: + clk_disable_unprepare(priv->clk); +err: + free_netdev(dev); + return err; +} + +static int bcmgenet_remove(struct platform_device *pdev) +{ + struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); + + dev_set_drvdata(&pdev->dev, NULL); + if (priv->ecdev) { + ecdev_close(priv->ecdev); + ecdev_withdraw(priv->ecdev); + priv->ecdev = NULL; + } else + unregister_netdev(priv->dev); + bcmgenet_mii_exit(priv->dev); + free_netdev(priv->dev); + + return 0; +} + +static void bcmgenet_shutdown(struct platform_device *pdev) +{ + bcmgenet_remove(pdev); +} + +#ifdef CONFIG_PM_SLEEP +static int bcmgenet_resume_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + u32 reg; + + if (priv->ecdev) + ecdev_open(priv->ecdev); + else if (!netif_running(dev)) + return 0; + + /* Turn on the clock */ + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + if (device_may_wakeup(d) && priv->wolopts) { + /* Account for Wake-on-LAN events and clear those events + * (Some devices need more time between enabling the clocks + * and the interrupt register reflecting the wake event so + * read the register twice) + */ + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + if (reg & UMAC_IRQ_WAKE_EVENT) + pm_wakeup_event(&priv->pdev->dev, 0); + } + + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR); + + return 0; +} + +static int bcmgenet_resume(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + unsigned long dma_ctrl; + int ret; + + if (!netif_running(dev)) + return 0; + + /* From WOL-enabled suspend, switch to regular clock */ + if (device_may_wakeup(d) && priv->wolopts) + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + bcmgenet_umac_reset(priv); + + init_umac(priv); + + phy_init_hw(dev->phydev); + + /* Speed settings must be restored */ + genphy_config_aneg(dev->phydev); + bcmgenet_mii_config(priv->dev, false); + + /* Restore enabled features */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Restore hardware filters */ + bcmgenet_hfb_clear(priv); + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + bcmgenet_hfb_create_rxnfc_filter(priv, rule); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto out_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + if (!device_may_wakeup(d)) + phy_resume(dev->phydev); + + if (priv->eee.eee_enabled) + bcmgenet_eee_enable_set(dev, true); + + bcmgenet_netif_start(dev); + + if (priv->ecdev) + ecdev_open(priv->ecdev); + else + netif_device_attach(dev); + + return 0; + +out_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static int bcmgenet_suspend(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (priv->ecdev) { + ecdev_close(priv->ecdev); + } else { + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + } + + bcmgenet_netif_stop(dev); + + if (!device_may_wakeup(d)) + phy_suspend(dev->phydev); + + /* Disable filtering */ + bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); + + return 0; +} + +static int bcmgenet_suspend_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + if (priv->ecdev) { + ecdev_close(priv->ecdev); + } else if (!netif_running(dev)) + return 0; + + /* Prepare the device for Wake-on-LAN and switch to the slow clock */ + if (device_may_wakeup(d) && priv->wolopts) + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); + else if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + /* Let the framework handle resumption and leave the clocks on */ + if (ret) + return ret; + + /* Turn off the clocks */ + clk_disable_unprepare(priv->clk); + + return 0; +} +#else +#define bcmgenet_suspend NULL +#define bcmgenet_suspend_noirq NULL +#define bcmgenet_resume NULL +#define bcmgenet_resume_noirq NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops bcmgenet_pm_ops = { + .suspend = bcmgenet_suspend, + .suspend_noirq = bcmgenet_suspend_noirq, + .resume = bcmgenet_resume, + .resume_noirq = bcmgenet_resume_noirq, +}; + +static const struct acpi_device_id genet_acpi_match[] = { + { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, genet_acpi_match); + +static struct platform_driver bcmgenet_driver = { + .probe = bcmgenet_probe, + .remove = bcmgenet_remove, + .shutdown = bcmgenet_shutdown, + .driver = { + .name = "ec_bcmgenet", + .of_match_table = bcmgenet_match, + .pm = &bcmgenet_pm_ops, + .acpi_match_table = genet_acpi_match, + }, +}; +module_platform_driver(bcmgenet_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver " + "(EtherCAT enabled, Version " REV ")"); +MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: mdio-bcm-unimac"); diff --git a/devices/genet/bcmgenet-5.14-ethercat.h b/devices/genet/bcmgenet-5.14-ethercat.h new file mode 100644 index 00000000..c8716b23 --- /dev/null +++ b/devices/genet/bcmgenet-5.14-ethercat.h @@ -0,0 +1,709 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2020 Broadcom + */ + +#ifndef __BCMGENET_H__ +#define __BCMGENET_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "unimac-5.14-ethercat.h" + +/* EtherCAT header file */ +#include "../ecdev.h" + +/* total number of Buffer Descriptors, same for Rx/Tx */ +#define TOTAL_DESC 256 + +/* which ring is descriptor based */ +#define DESC_INDEX 16 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN 6 +#define ENET_PAD 8 +#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ + ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) +#define DMA_MAX_BURST_LENGTH 0x10 + +/* misc. configuration */ +#define MAX_NUM_OF_FS_RULES 16 +#define CLEAR_ALL_HFB 0xFF +#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4) +#define DMA_FC_THRESH_LO 5 + +/* 64B receive/transmit status block */ +struct status_64 { + u32 length_status; /* length and peripheral status */ + u32 ext_status; /* Extended status*/ + u32 rx_csum; /* partial rx checksum */ + u32 unused1[9]; /* unused */ + u32 tx_csum_info; /* Tx checksum info. */ + u32 unused2[3]; /* unused */ +}; + +/* Rx status bits */ +#define STATUS_RX_EXT_MASK 0x1FFFFF +#define STATUS_RX_CSUM_MASK 0xFFFF +#define STATUS_RX_CSUM_OK 0x10000 +#define STATUS_RX_CSUM_FR 0x20000 +#define STATUS_RX_PROTO_TCP 0 +#define STATUS_RX_PROTO_UDP 1 +#define STATUS_RX_PROTO_ICMP 2 +#define STATUS_RX_PROTO_OTHER 3 +#define STATUS_RX_PROTO_MASK 3 +#define STATUS_RX_PROTO_SHIFT 18 +#define STATUS_FILTER_INDEX_MASK 0xFFFF +/* Tx status bits */ +#define STATUS_TX_CSUM_START_MASK 0X7FFF +#define STATUS_TX_CSUM_START_SHIFT 16 +#define STATUS_TX_CSUM_PROTO_UDP 0x8000 +#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF +#define STATUS_TX_CSUM_LV 0x80000000 + +/* DMA Descriptor */ +#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */ +#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */ +#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */ + +/* Rx/Tx common counter group */ +struct bcmgenet_pkt_counters { + u32 cnt_64; /* RO Received/Transmited 64 bytes packet */ + u32 cnt_127; /* RO Rx/Tx 127 bytes packet */ + u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */ + u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */ + u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */ + u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */ + u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */ + u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/ + u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/ + u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcmgenet_rx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkt; /* RO (0x428) Received pkt count*/ + u32 bytes; /* RO Received byte count */ + u32 mca; /* RO # of Received multicast pkt */ + u32 bca; /* RO # of Receive broadcast pkt */ + u32 fcs; /* RO # of Received FCS error */ + u32 cf; /* RO # of Received control frame pkt*/ + u32 pf; /* RO # of Received pause frame pkt */ + u32 uo; /* RO # of unknown op code pkt */ + u32 aln; /* RO # of alignment error count */ + u32 flr; /* RO # of frame length out of range count */ + u32 cde; /* RO # of code error pkt */ + u32 fcr; /* RO # of carrier sense error pkt */ + u32 ovr; /* RO # of oversize pkt*/ + u32 jbr; /* RO # of jabber count */ + u32 mtue; /* RO # of MTU error pkt*/ + u32 pok; /* RO # of Received good pkt */ + u32 uc; /* RO # of unicast pkt */ + u32 ppp; /* RO # of PPP pkt */ + u32 rcrc; /* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcmgenet_tx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkts; /* RO (0x4a8) Transmited pkt */ + u32 mca; /* RO # of xmited multicast pkt */ + u32 bca; /* RO # of xmited broadcast pkt */ + u32 pf; /* RO # of xmited pause frame count */ + u32 cf; /* RO # of xmited control frame count */ + u32 fcs; /* RO # of xmited FCS error count */ + u32 ovr; /* RO # of xmited oversize pkt */ + u32 drf; /* RO # of xmited deferral pkt */ + u32 edf; /* RO # of xmited Excessive deferral pkt*/ + u32 scl; /* RO # of xmited single collision pkt */ + u32 mcl; /* RO # of xmited multiple collision pkt*/ + u32 lcl; /* RO # of xmited late collision pkt */ + u32 ecl; /* RO # of xmited excessive collision pkt*/ + u32 frg; /* RO # of xmited fragments pkt*/ + u32 ncl; /* RO # of xmited total collision count */ + u32 jbr; /* RO # of xmited jabber count*/ + u32 bytes; /* RO # of xmited byte count */ + u32 pok; /* RO # of xmited good pkt */ + u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcmgenet_mib_counters { + struct bcmgenet_rx_counters rx; + struct bcmgenet_tx_counters tx; + u32 rx_runt_cnt; + u32 rx_runt_fcs; + u32 rx_runt_fcs_align; + u32 rx_runt_bytes; + u32 rbuf_ovflow_cnt; + u32 rbuf_err_cnt; + u32 mdf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed; +}; + +#define UMAC_MIB_START 0x400 + +#define UMAC_MDIO_CMD 0x614 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define UMAC_RBUF_OVFL_CNT_V1 0x61C +#define RBUF_OVFL_CNT_V2 0x80 +#define RBUF_OVFL_CNT_V3PLUS 0x94 + +#define UMAC_MPD_CTRL 0x620 +#define MPD_EN (1 << 0) +#define MPD_PW_EN (1 << 27) +#define MPD_MSEQ_LEN_SHIFT 16 +#define MPD_MSEQ_LEN_MASK 0xFF + +#define UMAC_MPD_PW_MS 0x624 +#define UMAC_MPD_PW_LS 0x628 +#define UMAC_RBUF_ERR_CNT_V1 0x634 +#define RBUF_ERR_CNT_V2 0x84 +#define RBUF_ERR_CNT_V3PLUS 0x98 +#define UMAC_MDF_ERR_CNT 0x638 +#define UMAC_MDF_CTRL 0x650 +#define UMAC_MDF_ADDR 0x654 +#define UMAC_MIB_CTRL 0x580 +#define MIB_RESET_RX (1 << 0) +#define MIB_RESET_RUNT (1 << 1) +#define MIB_RESET_TX (1 << 2) + +#define RBUF_CTRL 0x00 +#define RBUF_64B_EN (1 << 0) +#define RBUF_ALIGN_2B (1 << 1) +#define RBUF_BAD_DIS (1 << 2) + +#define RBUF_STATUS 0x0C +#define RBUF_STATUS_WOL (1 << 0) +#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1) +#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2) + +#define RBUF_CHK_CTRL 0x14 +#define RBUF_RXCHK_EN (1 << 0) +#define RBUF_SKIP_FCS (1 << 4) +#define RBUF_L3_PARSE_DIS (1 << 5) + +#define RBUF_ENERGY_CTRL 0x9c +#define RBUF_EEE_EN (1 << 0) +#define RBUF_PM_EN (1 << 1) + +#define RBUF_TBUF_SIZE_CTRL 0xb4 + +#define RBUF_HFB_CTRL_V1 0x38 +#define RBUF_HFB_FILTER_EN_SHIFT 16 +#define RBUF_HFB_FILTER_EN_MASK 0xffff0000 +#define RBUF_HFB_EN (1 << 0) +#define RBUF_HFB_256B (1 << 1) +#define RBUF_ACPI_EN (1 << 2) + +#define RBUF_HFB_LEN_V1 0x3C +#define RBUF_FLTR_LEN_MASK 0xFF +#define RBUF_FLTR_LEN_SHIFT 8 + +#define TBUF_CTRL 0x00 +#define TBUF_64B_EN (1 << 0) +#define TBUF_BP_MC 0x0C +#define TBUF_ENERGY_CTRL 0x14 +#define TBUF_EEE_EN (1 << 0) +#define TBUF_PM_EN (1 << 1) + +#define TBUF_CTRL_V1 0x80 +#define TBUF_BP_MC_V1 0xA0 + +#define HFB_CTRL 0x00 +#define HFB_FLT_ENABLE_V3PLUS 0x04 +#define HFB_FLT_LEN_V2 0x04 +#define HFB_FLT_LEN_V3PLUS 0x1C + +/* uniMac intrl2 registers */ +#define INTRL2_CPU_STAT 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0C +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* INTRL2 instance 0 definitions */ +#define UMAC_IRQ_SCB (1 << 0) +#define UMAC_IRQ_EPHY (1 << 1) +#define UMAC_IRQ_PHY_DET_R (1 << 2) +#define UMAC_IRQ_PHY_DET_F (1 << 3) +#define UMAC_IRQ_LINK_UP (1 << 4) +#define UMAC_IRQ_LINK_DOWN (1 << 5) +#define UMAC_IRQ_LINK_EVENT (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN) +#define UMAC_IRQ_UMAC (1 << 6) +#define UMAC_IRQ_UMAC_TSV (1 << 7) +#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8) +#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9) +#define UMAC_IRQ_HFB_SM (1 << 10) +#define UMAC_IRQ_HFB_MM (1 << 11) +#define UMAC_IRQ_MPD_R (1 << 12) +#define UMAC_IRQ_WAKE_EVENT (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \ + UMAC_IRQ_MPD_R) +#define UMAC_IRQ_RXDMA_MBDONE (1 << 13) +#define UMAC_IRQ_RXDMA_PDONE (1 << 14) +#define UMAC_IRQ_RXDMA_BDONE (1 << 15) +#define UMAC_IRQ_RXDMA_DONE UMAC_IRQ_RXDMA_MBDONE +#define UMAC_IRQ_TXDMA_MBDONE (1 << 16) +#define UMAC_IRQ_TXDMA_PDONE (1 << 17) +#define UMAC_IRQ_TXDMA_BDONE (1 << 18) +#define UMAC_IRQ_TXDMA_DONE UMAC_IRQ_TXDMA_MBDONE + +/* Only valid for GENETv3+ */ +#define UMAC_IRQ_MDIO_DONE (1 << 23) +#define UMAC_IRQ_MDIO_ERROR (1 << 24) + +/* INTRL2 instance 1 definitions */ +#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_SHIFT 16 + +/* Register block offsets */ +#define GENET_SYS_OFF 0x0000 +#define GENET_GR_BRIDGE_OFF 0x0040 +#define GENET_EXT_OFF 0x0080 +#define GENET_INTRL2_0_OFF 0x0200 +#define GENET_INTRL2_1_OFF 0x0240 +#define GENET_RBUF_OFF 0x0300 +#define GENET_UMAC_OFF 0x0800 + +/* SYS block offsets and register definitions */ +#define SYS_REV_CTRL 0x00 +#define SYS_PORT_CTRL 0x04 +#define PORT_MODE_INT_EPHY 0 +#define PORT_MODE_INT_GPHY 1 +#define PORT_MODE_EXT_EPHY 2 +#define PORT_MODE_EXT_GPHY 3 +#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4)) +#define PORT_MODE_EXT_RVMII_50 4 +#define LED_ACT_SOURCE_MAC (1 << 9) + +#define SYS_RBUF_FLUSH_CTRL 0x08 +#define SYS_TBUF_FLUSH_CTRL 0x0C +#define RBUF_FLUSH_CTRL_V1 0x04 + +/* Ext block register offsets and definitions */ +#define EXT_EXT_PWR_MGMT 0x00 +#define EXT_PWR_DOWN_BIAS (1 << 0) +#define EXT_PWR_DOWN_DLL (1 << 1) +#define EXT_PWR_DOWN_PHY (1 << 2) +#define EXT_PWR_DN_EN_LD (1 << 3) +#define EXT_ENERGY_DET (1 << 4) +#define EXT_IDDQ_FROM_PHY (1 << 5) +#define EXT_IDDQ_GLBL_PWR (1 << 7) +#define EXT_PHY_RESET (1 << 8) +#define EXT_ENERGY_DET_MASK (1 << 12) +#define EXT_PWR_DOWN_PHY_TX (1 << 16) +#define EXT_PWR_DOWN_PHY_RX (1 << 17) +#define EXT_PWR_DOWN_PHY_SD (1 << 18) +#define EXT_PWR_DOWN_PHY_RD (1 << 19) +#define EXT_PWR_DOWN_PHY_EN (1 << 20) + +#define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN_V123 (1 << 0) +#define RGMII_LINK (1 << 4) +#define OOB_DISABLE (1 << 5) +#define RGMII_MODE_EN (1 << 6) +#define ID_MODE_DIS (1 << 16) + +#define EXT_GPHY_CTRL 0x1C +#define EXT_CFG_IDDQ_BIAS (1 << 0) +#define EXT_CFG_PWR_DOWN (1 << 1) +#define EXT_CK25_DIS (1 << 4) +#define EXT_GPHY_RESET (1 << 5) + +/* DMA rings size */ +#define DMA_RING_SIZE (0x40) +#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1)) + +/* DMA registers common definitions */ +#define DMA_RW_POINTER_MASK 0x1FF +#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF +#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16 +#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF +#define DMA_BUFFER_DONE_CNT_SHIFT 16 +#define DMA_P_INDEX_MASK 0xFFFF +#define DMA_C_INDEX_MASK 0xFFFF + +/* DMA ring size register */ +#define DMA_RING_SIZE_MASK 0xFFFF +#define DMA_RING_SIZE_SHIFT 16 +#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF + +/* DMA interrupt threshold register */ +#define DMA_INTR_THRESHOLD_MASK 0x01FF + +/* DMA XON/XOFF register */ +#define DMA_XON_THREHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_SHIFT 16 + +/* DMA flow period register */ +#define DMA_FLOW_PERIOD_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_SHIFT 16 + + +/* DMA control register */ +#define DMA_EN (1 << 0) +#define DMA_RING_BUF_EN_SHIFT 0x01 +#define DMA_RING_BUF_EN_MASK 0xFFFF +#define DMA_TSB_SWAP_EN (1 << 20) + +/* DMA status register */ +#define DMA_DISABLED (1 << 0) +#define DMA_DESC_RAM_INIT_BUSY (1 << 1) + +/* DMA SCB burst size register */ +#define DMA_SCB_BURST_SIZE_MASK 0x1F + +/* DMA activity vector register */ +#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF + +/* DMA backpressure mask register */ +#define DMA_BACKPRESSURE_MASK 0x1FFFF +#define DMA_PFC_ENABLE (1 << 31) + +/* DMA backpressure status register */ +#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF + +/* DMA override register */ +#define DMA_LITTLE_ENDIAN_MODE (1 << 0) +#define DMA_REGISTER_MODE (1 << 1) + +/* DMA timeout register */ +#define DMA_TIMEOUT_MASK 0xFFFF +#define DMA_TIMEOUT_VAL 5000 /* micro seconds */ + +/* TDMA rate limiting control register */ +#define DMA_RATE_LIMIT_EN_MASK 0xFFFF + +/* TDMA arbitration control register */ +#define DMA_ARBITER_MODE_MASK 0x03 +#define DMA_RING_BUF_PRIORITY_MASK 0x1F +#define DMA_RING_BUF_PRIORITY_SHIFT 5 +#define DMA_PRIO_REG_INDEX(q) ((q) / 6) +#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT) +#define DMA_RATE_ADJ_MASK 0xFF + +/* Tx/Rx Dma Descriptor common bits*/ +#define DMA_BUFLENGTH_MASK 0x0fff +#define DMA_BUFLENGTH_SHIFT 16 +#define DMA_OWN 0x8000 +#define DMA_EOP 0x4000 +#define DMA_SOP 0x2000 +#define DMA_WRAP 0x1000 +/* Tx specific Dma descriptor bits */ +#define DMA_TX_UNDERRUN 0x0200 +#define DMA_TX_APPEND_CRC 0x0040 +#define DMA_TX_OW_CRC 0x0020 +#define DMA_TX_DO_CSUM 0x0010 +#define DMA_TX_QTAG_SHIFT 7 + +/* Rx Specific Dma descriptor bits */ +#define DMA_RX_CHK_V3PLUS 0x8000 +#define DMA_RX_CHK_V12 0x1000 +#define DMA_RX_BRDCAST 0x0040 +#define DMA_RX_MULT 0x0020 +#define DMA_RX_LG 0x0010 +#define DMA_RX_NO 0x0008 +#define DMA_RX_RXER 0x0004 +#define DMA_RX_CRC_ERROR 0x0002 +#define DMA_RX_OV 0x0001 +#define DMA_RX_FI_MASK 0x001F +#define DMA_RX_FI_SHIFT 0x0007 +#define DMA_DESC_ALLOC_MASK 0x00FF + +#define DMA_ARBITER_RR 0x00 +#define DMA_ARBITER_WRR 0x01 +#define DMA_ARBITER_SP 0x02 + +struct enet_cb { + struct sk_buff *skb; + void __iomem *bd_addr; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* power management mode */ +enum bcmgenet_power_mode { + GENET_POWER_CABLE_SENSE = 0, + GENET_POWER_PASSIVE, + GENET_POWER_WOL_MAGIC, +}; + +struct bcmgenet_priv; + +/* We support both runtime GENET detection and compile-time + * to optimize code-paths for a given hardware + */ +enum bcmgenet_version { + GENET_V1 = 1, + GENET_V2, + GENET_V3, + GENET_V4, + GENET_V5 +}; + +#define GENET_IS_V1(p) ((p)->version == GENET_V1) +#define GENET_IS_V2(p) ((p)->version == GENET_V2) +#define GENET_IS_V3(p) ((p)->version == GENET_V3) +#define GENET_IS_V4(p) ((p)->version == GENET_V4) +#define GENET_IS_V5(p) ((p)->version == GENET_V5) + +/* Hardware flags */ +#define GENET_HAS_40BITS (1 << 0) +#define GENET_HAS_EXT (1 << 1) +#define GENET_HAS_MDIO_INTR (1 << 2) +#define GENET_HAS_MOCA_LINK_DET (1 << 3) + +/* BCMGENET hardware parameters, keep this structure nicely aligned + * since it is going to be used in hot paths + */ +struct bcmgenet_hw_params { + u8 tx_queues; + u8 tx_bds_per_q; + u8 rx_queues; + u8 rx_bds_per_q; + u8 bp_in_en_shift; + u32 bp_in_mask; + u8 hfb_filter_cnt; + u8 hfb_filter_size; + u8 qtag_mask; + u16 tbuf_offset; + u32 hfb_offset; + u32 hfb_reg_offset; + u32 rdma_offset; + u32 tdma_offset; + u32 words_per_bd; + u32 flags; +}; + +struct bcmgenet_skb_cb { + struct enet_cb *first_cb; /* First control block of SKB */ + struct enet_cb *last_cb; /* Last control block of SKB */ + unsigned int bytes_sent; /* bytes on the wire (no TSB) */ +}; + +#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb)) + +struct bcmgenet_tx_ring { + spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ + unsigned long packets; + unsigned long bytes; + unsigned int index; /* ring index */ + unsigned int queue; /* queue index */ + struct enet_cb *cbs; /* tx ring buffer control block*/ + unsigned int size; /* size of each tx ring */ + unsigned int clean_ptr; /* Tx ring clean pointer */ + unsigned int c_index; /* last consumer index of each ring*/ + unsigned int free_bds; /* # of free bds for each ring */ + unsigned int write_ptr; /* Tx ring write pointer SW copy */ + unsigned int prod_index; /* Tx ring producer index SW copy */ + unsigned int cb_ptr; /* Tx ring initial CB ptr */ + unsigned int end_ptr; /* Tx ring end CB ptr */ + void (*int_enable)(struct bcmgenet_tx_ring *); + void (*int_disable)(struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; +}; + +struct bcmgenet_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct dim dim; +}; + +struct bcmgenet_rx_ring { + struct napi_struct napi; /* Rx NAPI struct */ + unsigned long bytes; + unsigned long packets; + unsigned long errors; + unsigned long dropped; + unsigned int index; /* Rx ring index */ + struct enet_cb *cbs; /* Rx ring buffer control block */ + unsigned int size; /* Rx ring size */ + unsigned int c_index; /* Rx last consumer index */ + unsigned int read_ptr; /* Rx ring read pointer */ + unsigned int cb_ptr; /* Rx ring initial CB ptr */ + unsigned int end_ptr; /* Rx ring end CB ptr */ + unsigned int old_discards; + struct bcmgenet_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; + void (*int_enable)(struct bcmgenet_rx_ring *); + void (*int_disable)(struct bcmgenet_rx_ring *); + struct bcmgenet_priv *priv; +}; + +enum bcmgenet_rxnfc_state { + BCMGENET_RXNFC_STATE_UNUSED = 0, + BCMGENET_RXNFC_STATE_DISABLED, + BCMGENET_RXNFC_STATE_ENABLED +}; + +struct bcmgenet_rxnfc_rule { + struct list_head list; + struct ethtool_rx_flow_spec fs; + enum bcmgenet_rxnfc_state state; +}; + +/* device context */ +struct bcmgenet_priv { + void __iomem *base; + enum bcmgenet_version version; + struct net_device *dev; + + /* transmit variables */ + void __iomem *tx_bds; + struct enet_cb *tx_cbs; + unsigned int num_tx_bds; + + struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + + /* receive variables */ + void __iomem *rx_bds; + struct enet_cb *rx_cbs; + unsigned int num_rx_bds; + unsigned int rx_buf_len; + struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES]; + struct list_head rxnfc_list; + + struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; + + /* other misc variables */ + struct bcmgenet_hw_params *hw_params; + + /* MDIO bus variables */ + wait_queue_head_t wq; + bool internal_phy; + struct device_node *phy_dn; + struct device_node *mdio_dn; + struct mii_bus *mii_bus; + u16 gphy_rev; + struct clk *clk_eee; + bool clk_eee_enabled; + + /* PHY device variables */ + int old_link; + int old_speed; + int old_duplex; + int old_pause; + phy_interface_t phy_interface; + int phy_addr; + int ext_phy; + + /* Interrupt variables */ + struct work_struct bcmgenet_irq_work; + int irq0; + int irq1; + int wol_irq; + bool wol_irq_disabled; + + /* shared status */ + spinlock_t lock; + unsigned int irq0_stat; + + /* HW descriptors/checksum variables */ + bool crc_fwd_en; + + u32 dma_max_burst_length; + + u32 msg_enable; + + struct clk *clk; + struct platform_device *pdev; + struct platform_device *mii_pdev; + + /* WOL */ + struct clk *clk_wol; + u32 wolopts; + u8 sopass[SOPASS_MAX]; + bool wol_active; + + struct bcmgenet_mib_counters mib; + + struct ethtool_eee eee; + /* EtherCAT device variables */ + ec_device_t *ecdev; +}; + +#define GENET_IO_MACRO(name, offset) \ +static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ + u32 off) \ +{ \ + /* MIPS chips strapped for BE will automagically configure the \ + * peripheral registers for CPU-native byte order. \ + */ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + return __raw_readl(priv->base + offset + off); \ + else \ + return readl_relaxed(priv->base + offset + off); \ +} \ +static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ + u32 val, u32 off) \ +{ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + __raw_writel(val, priv->base + offset + off); \ + else \ + writel_relaxed(val, priv->base + offset + off); \ +} + +GENET_IO_MACRO(ext, GENET_EXT_OFF); +GENET_IO_MACRO(umac, GENET_UMAC_OFF); +GENET_IO_MACRO(sys, GENET_SYS_OFF); + +/* interrupt l2 registers accessors */ +GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF); +GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF); + +/* HFB register accessors */ +GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset); + +/* GENET v2+ HFB control and filter len helpers */ +GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset); + +/* RBUF register accessors */ +GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); + +/* MDIO routines */ +int bcmgenet_mii_init(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_probe(struct net_device *dev); +void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_phy_power_set(struct net_device *dev, bool enable); +void bcmgenet_mii_setup(struct net_device *dev); + +/* Wake-on-LAN routines */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); + +#endif /* __BCMGENET_H__ */ diff --git a/devices/genet/bcmgenet-5.14-orig.c b/devices/genet/bcmgenet-5.14-orig.c new file mode 100644 index 00000000..db742419 --- /dev/null +++ b/devices/genet/bcmgenet-5.14-orig.c @@ -0,0 +1,4292 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) controller driver + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bcmgenet.h" + +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT 4 + +/* Default highest priority queue for multi queue support */ +#define GENET_Q0_PRIORITY 0 + +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) + +#define RX_BUF_LENGTH 2048 +#define SKB_ALIGNMENT 32 + +/* Tx/Rx DMA register offset, skip 256 descriptors */ +#define WORDS_PER_BD(p) (p->hw_params->words_per_bd) +#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) + +#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +/* Forward declarations */ +static void bcmgenet_set_rx_mode(struct net_device *dev); + +static inline void bcmgenet_writel(u32 value, void __iomem *offset) +{ + /* MIPS chips strapped for BE will automagically configure the + * peripheral registers for CPU-native byte order. + */ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(value, offset); + else + writel_relaxed(value, offset); +} + +static inline u32 bcmgenet_readl(void __iomem *offset) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(offset); + else + return readl_relaxed(offset); +} + +static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, + void __iomem *d, u32 value) +{ + bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); +} + +static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, + void __iomem *d, + dma_addr_t addr) +{ + bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); +#endif +} + +/* Combined address + length/status setter */ +static inline void dmadesc_set(struct bcmgenet_priv *priv, + void __iomem *d, dma_addr_t addr, u32 val) +{ + dmadesc_set_addr(priv, d, addr); + dmadesc_set_length_status(priv, d, val); +} + +static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, + void __iomem *d) +{ + dma_addr_t addr; + + addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32; +#endif + return addr; +} + +#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" + +#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); + else + return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); +} + +static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); + else + bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); +} + +/* These macros are defined to deal with register map change + * between GENET1.1 and GENET2. Only those currently being used + * by driver are defined. + */ +static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +/* RX/TX DMA register accessors */ +enum dma_reg { + DMA_RING_CFG = 0, + DMA_CTRL, + DMA_STATUS, + DMA_SCB_BURST_SIZE, + DMA_ARB_CTRL, + DMA_PRIORITY_0, + DMA_PRIORITY_1, + DMA_PRIORITY_2, + DMA_INDEX2RING_0, + DMA_INDEX2RING_1, + DMA_INDEX2RING_2, + DMA_INDEX2RING_3, + DMA_INDEX2RING_4, + DMA_INDEX2RING_5, + DMA_INDEX2RING_6, + DMA_INDEX2RING_7, + DMA_RING0_TIMEOUT, + DMA_RING1_TIMEOUT, + DMA_RING2_TIMEOUT, + DMA_RING3_TIMEOUT, + DMA_RING4_TIMEOUT, + DMA_RING5_TIMEOUT, + DMA_RING6_TIMEOUT, + DMA_RING7_TIMEOUT, + DMA_RING8_TIMEOUT, + DMA_RING9_TIMEOUT, + DMA_RING10_TIMEOUT, + DMA_RING11_TIMEOUT, + DMA_RING12_TIMEOUT, + DMA_RING13_TIMEOUT, + DMA_RING14_TIMEOUT, + DMA_RING15_TIMEOUT, + DMA_RING16_TIMEOUT, +}; + +static const u8 bcmgenet_dma_regs_v3plus[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x2C, + [DMA_PRIORITY_0] = 0x30, + [DMA_PRIORITY_1] = 0x34, + [DMA_PRIORITY_2] = 0x38, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, + [DMA_INDEX2RING_0] = 0x70, + [DMA_INDEX2RING_1] = 0x74, + [DMA_INDEX2RING_2] = 0x78, + [DMA_INDEX2RING_3] = 0x7C, + [DMA_INDEX2RING_4] = 0x80, + [DMA_INDEX2RING_5] = 0x84, + [DMA_INDEX2RING_6] = 0x88, + [DMA_INDEX2RING_7] = 0x8C, +}; + +static const u8 bcmgenet_dma_regs_v2[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +static const u8 bcmgenet_dma_regs_v1[] = { + [DMA_CTRL] = 0x00, + [DMA_STATUS] = 0x04, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +/* Set at runtime once bcmgenet version is known */ +static const u8 *bcmgenet_dma_regs; + +static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) +{ + return netdev_priv(dev_get_drvdata(dev)); +} + +static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +/* RDMA/TDMA ring registers and accessors + * we merge the common fields and just prefix with T/D the registers + * having different meaning depending on the direction + */ +enum dma_ring_reg { + TDMA_READ_PTR = 0, + RDMA_WRITE_PTR = TDMA_READ_PTR, + TDMA_READ_PTR_HI, + RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, + TDMA_CONS_INDEX, + RDMA_PROD_INDEX = TDMA_CONS_INDEX, + TDMA_PROD_INDEX, + RDMA_CONS_INDEX = TDMA_PROD_INDEX, + DMA_RING_BUF_SIZE, + DMA_START_ADDR, + DMA_START_ADDR_HI, + DMA_END_ADDR, + DMA_END_ADDR_HI, + DMA_MBUF_DONE_THRESH, + TDMA_FLOW_PERIOD, + RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, + TDMA_WRITE_PTR, + RDMA_READ_PTR = TDMA_WRITE_PTR, + TDMA_WRITE_PTR_HI, + RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI +}; + +/* GENET v4 supports 40-bits pointer addressing + * for obvious reasons the LO and HI word parts + * are contiguous, but this offsets the other + * registers. + */ +static const u8 genet_dma_ring_regs_v4[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_READ_PTR_HI] = 0x04, + [TDMA_CONS_INDEX] = 0x08, + [TDMA_PROD_INDEX] = 0x0C, + [DMA_RING_BUF_SIZE] = 0x10, + [DMA_START_ADDR] = 0x14, + [DMA_START_ADDR_HI] = 0x18, + [DMA_END_ADDR] = 0x1C, + [DMA_END_ADDR_HI] = 0x20, + [DMA_MBUF_DONE_THRESH] = 0x24, + [TDMA_FLOW_PERIOD] = 0x28, + [TDMA_WRITE_PTR] = 0x2C, + [TDMA_WRITE_PTR_HI] = 0x30, +}; + +static const u8 genet_dma_ring_regs_v123[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_CONS_INDEX] = 0x04, + [TDMA_PROD_INDEX] = 0x08, + [DMA_RING_BUF_SIZE] = 0x0C, + [DMA_START_ADDR] = 0x10, + [DMA_END_ADDR] = 0x14, + [DMA_MBUF_DONE_THRESH] = 0x18, + [TDMA_FLOW_PERIOD] = 0x1C, + [TDMA_WRITE_PTR] = 0x20, +}; + +/* Set at runtime once GENET version is known */ +static const u8 *genet_dma_ring_regs; + +static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg |= RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); +} + +static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset, reg, reg1; + + offset = HFB_FLT_ENABLE_V3PLUS; + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + if (f_index < 32) { + reg1 &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32)); + } else { + reg &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + } + if (!reg && !reg1) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg &= ~RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } +} + +static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, + u32 f_index, u32 rx_queue) +{ + u32 offset; + u32 reg; + + offset = f_index / 8; + reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); + reg &= ~(0xF << (4 * (f_index % 8))); + reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); + bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); +} + +static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, + u32 f_index, u32 f_length) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_LEN_V3PLUS + + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * + sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg &= ~(0xFF << (8 * (f_index % 4))); + reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static int bcmgenet_hfb_validate_mask(void *mask, size_t size) +{ + while (size) { + switch (*(unsigned char *)mask++) { + case 0x00: + case 0x0f: + case 0xf0: + case 0xff: + size--; + continue; + default: + return -EINVAL; + } + } + + return 0; +} + +#define VALIDATE_MASK(x) \ + bcmgenet_hfb_validate_mask(&(x), sizeof(x)) + +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, + u32 offset, void *val, void *mask, + size_t size) +{ + u32 index, tmp; + + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); + + while (size--) { + if (offset++ & 1) { + tmp &= ~0x300FF; + tmp |= (*(unsigned char *)val++); + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0x30000; + break; + case 0xF0: + tmp |= 0x20000; + break; + case 0x0F: + tmp |= 0x10000; + break; + } + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); + if (size) + tmp = bcmgenet_hfb_readl(priv, + index * sizeof(u32)); + } else { + tmp &= ~0xCFF00; + tmp |= (*(unsigned char *)val++) << 8; + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0xC0000; + break; + case 0xF0: + tmp |= 0x80000; + break; + case 0x0F: + tmp |= 0x40000; + break; + } + if (!size) + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); + } + } + + return 0; +} + +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, + struct bcmgenet_rxnfc_rule *rule) +{ + struct ethtool_rx_flow_spec *fs = &rule->fs; + u32 offset = 0, f_length = 0, f; + u8 val_8, mask_8; + __be16 val_16; + u16 mask_16; + size_t size; + + f = fs->location; + if (fs->flow_type & FLOW_MAC_EXT) { + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_ext.h_dest, &fs->m_ext.h_dest, + sizeof(fs->h_ext.h_dest)); + } + + if (fs->flow_type & FLOW_EXT) { + if (fs->m_ext.vlan_etype || + fs->m_ext.vlan_tci) { + bcmgenet_hfb_insert_data(priv, f, 12, + &fs->h_ext.vlan_etype, + &fs->m_ext.vlan_etype, + sizeof(fs->h_ext.vlan_etype)); + bcmgenet_hfb_insert_data(priv, f, 14, + &fs->h_ext.vlan_tci, + &fs->m_ext.vlan_tci, + sizeof(fs->h_ext.vlan_tci)); + offset += VLAN_HLEN; + f_length += DIV_ROUND_UP(VLAN_HLEN, 2); + } + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN, 2); + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_u.ether_spec.h_dest, + &fs->m_u.ether_spec.h_dest, + sizeof(fs->h_u.ether_spec.h_dest)); + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, + &fs->h_u.ether_spec.h_source, + &fs->m_u.ether_spec.h_source, + sizeof(fs->h_u.ether_spec.h_source)); + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &fs->h_u.ether_spec.h_proto, + &fs->m_u.ether_spec.h_proto, + sizeof(fs->h_u.ether_spec.h_proto)); + break; + case IP_USER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2); + /* Specify IP Ether Type */ + val_16 = htons(ETH_P_IP); + mask_16 = 0xFFFF; + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmgenet_hfb_insert_data(priv, f, 15 + offset, + &fs->h_u.usr_ip4_spec.tos, + &fs->m_u.usr_ip4_spec.tos, + sizeof(fs->h_u.usr_ip4_spec.tos)); + bcmgenet_hfb_insert_data(priv, f, 23 + offset, + &fs->h_u.usr_ip4_spec.proto, + &fs->m_u.usr_ip4_spec.proto, + sizeof(fs->h_u.usr_ip4_spec.proto)); + bcmgenet_hfb_insert_data(priv, f, 26 + offset, + &fs->h_u.usr_ip4_spec.ip4src, + &fs->m_u.usr_ip4_spec.ip4src, + sizeof(fs->h_u.usr_ip4_spec.ip4src)); + bcmgenet_hfb_insert_data(priv, f, 30 + offset, + &fs->h_u.usr_ip4_spec.ip4dst, + &fs->m_u.usr_ip4_spec.ip4dst, + sizeof(fs->h_u.usr_ip4_spec.ip4dst)); + if (!fs->m_u.usr_ip4_spec.l4_4_bytes) + break; + + /* Only supports 20 byte IPv4 header */ + val_8 = 0x45; + mask_8 = 0xFF; + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, + &val_8, &mask_8, + sizeof(val_8)); + size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); + bcmgenet_hfb_insert_data(priv, f, + ETH_HLEN + 20 + offset, + &fs->h_u.usr_ip4_spec.l4_4_bytes, + &fs->m_u.usr_ip4_spec.l4_4_bytes, + size); + f_length += DIV_ROUND_UP(size, 2); + break; + } + + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); + if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { + /* Ring 0 flows can be handled by the default Descriptor Ring + * We'll map them to ring 0, but don't enable the filter + */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); + rule->state = BCMGENET_RXNFC_STATE_DISABLED; + } else { + /* Other Rx rings are direct mapped here */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, + fs->ring_cookie); + bcmgenet_hfb_enable_filter(priv, f); + rule->state = BCMGENET_RXNFC_STATE_ENABLED; + } +} + +/* bcmgenet_hfb_clear + * + * Clear Hardware Filter Block and disable all filtering. + */ +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 base, i; + + base = f_index * priv->hw_params->hfb_filter_size; + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); +} + +static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) +{ + u32 i; + + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); + + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0x0, i); + + for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) + bcmgenet_hfb_reg_writel(priv, 0x0, + HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) + bcmgenet_hfb_clear_filter(priv, i); +} + +static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) +{ + int i; + + INIT_LIST_HEAD(&priv->rxnfc_list); + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); + priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; + } + + bcmgenet_hfb_clear(priv); +} + +static int bcmgenet_begin(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn on the clock */ + return clk_prepare_enable(priv->clk); +} + +static void bcmgenet_complete(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn off the clock */ + clk_disable_unprepare(priv->clk); +} + +static int bcmgenet_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + phy_ethtool_ksettings_get(dev->phydev, cmd); + + return 0; +} + +static int bcmgenet_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + return phy_ethtool_ksettings_set(dev->phydev, cmd); +} + +static int bcmgenet_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Make sure we reflect the value of CRC_CMD_FWD */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static u32 bcmgenet_get_msglevel(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + priv->msg_enable = level; +} + +static int bcmgenet_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rx_ring *ring; + unsigned int i; + + ec->tx_max_coalesced_frames = + bcmgenet_tdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_max_coalesced_frames = + bcmgenet_rdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_coalesce_usecs = + bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; + + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ring = &priv->rx_rings[i]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + } + ring = &priv->rx_rings[DESC_INDEX]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + + return 0; +} + +static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, + u32 usecs, u32 pkts) +{ + struct bcmgenet_priv *priv = ring->priv; + unsigned int i = ring->index; + u32 reg; + + bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH); + + reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); + reg &= ~DMA_TIMEOUT_MASK; + reg |= DIV_ROUND_UP(usecs * 1000, 8192); + bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); +} + +static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct dim_cq_moder moder; + u32 usecs, pkts; + + ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; + ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { + moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + ring->dim.use_dim = ec->use_adaptive_rx_coalesce; + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +static int bcmgenet_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int i; + + /* Base system clock is 125Mhz, DMA timeout is this reference clock + * divided by 1024, which yields roughly 8.192us, our maximum value + * has to fit in the DMA_TIMEOUT_MASK (16 bits) + */ + if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->tx_max_coalesced_frames == 0 || + ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) + return -EINVAL; + + /* GENET TDMA hardware does not support a configurable timeout, but will + * always generate an interrupt either after MBDONE packets have been + * transmitted, or when the ring is empty. + */ + + /* Program all TX queues with the same values, as there is no + * ethtool knob to do coalescing on a per-queue basis + */ + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tdma_ring_writel(priv, i, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + bcmgenet_tdma_ring_writel(priv, DESC_INDEX, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + + for (i = 0; i < priv->hw_params->rx_queues; i++) + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); + + return 0; +} + +/* standard ethtool support functions. */ +enum bcmgenet_stat_type { + BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_MIB_RX, + BCMGENET_STAT_MIB_TX, + BCMGENET_STAT_RUNT, + BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, +}; + +struct bcmgenet_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_sizeof; + int stat_offset; + enum bcmgenet_stat_type type; + /* reg offset from UMAC base for misc counters */ + u16 reg_offset; +}; + +#define STAT_NETDEV(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ + .stat_offset = offsetof(struct net_device_stats, m), \ + .type = BCMGENET_STAT_NETDEV, \ +} + +#define STAT_GENET_MIB(str, m, _type) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = _type, \ +} + +#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) +#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) +#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) + +#define STAT_GENET_MISC(str, m, offset) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = BCMGENET_STAT_MISC, \ + .reg_offset = offset, \ +} + +#define STAT_GENET_Q(num) \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ + tx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ + tx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ + rx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ + rx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ + rx_rings[num].errors), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ + rx_rings[num].dropped) + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define BCMGENET_STAT_OFFSET 0xc + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { + /* general stats */ + STAT_NETDEV(rx_packets), + STAT_NETDEV(tx_packets), + STAT_NETDEV(rx_bytes), + STAT_NETDEV(tx_bytes), + STAT_NETDEV(rx_errors), + STAT_NETDEV(tx_errors), + STAT_NETDEV(rx_dropped), + STAT_NETDEV(tx_dropped), + STAT_NETDEV(multicast), + /* UniMAC RSV counters */ + STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), + STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), + STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), + STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), + STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), + STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), + STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), + STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), + STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), + STAT_GENET_MIB_RX("rx_control", mib.rx.cf), + STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), + STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), + STAT_GENET_MIB_RX("rx_align", mib.rx.aln), + STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), + STAT_GENET_MIB_RX("rx_code", mib.rx.cde), + STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), + STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), + STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), + STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), + STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), + STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), + STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), + STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), + /* UniMAC TSV counters */ + STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), + STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), + STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), + STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), + STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), + STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), + STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), + STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), + STAT_GENET_MIB_TX("tx_control", mib.tx.cf), + STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), + STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), + STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), + STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), + STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), + STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), + STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), + STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), + STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), + STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), + STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), + STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), + STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), + STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), + /* UniMAC RUNT counters */ + STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), + STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), + STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), + STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* Misc UniMAC counters */ + STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, + UMAC_RBUF_OVFL_CNT_V1), + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, + UMAC_RBUF_ERR_CNT_V1), + STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb), + STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed", + mib.tx_realloc_tsb_failed), + /* Per TX queues */ + STAT_GENET_Q(0), + STAT_GENET_Q(1), + STAT_GENET_Q(2), + STAT_GENET_Q(3), + STAT_GENET_Q(16), +}; + +#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) + +static void bcmgenet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); +} + +static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMGENET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmgenet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) +{ + u16 new_offset; + u32 val; + + switch (offset) { + case UMAC_RBUF_OVFL_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_OVFL_CNT_V2; + else + new_offset = RBUF_OVFL_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + case UMAC_RBUF_ERR_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_ERR_CNT_V2; + else + new_offset = RBUF_ERR_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + default: + val = bcmgenet_umac_readl(priv, offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, offset); + break; + } + + return val; +} + +static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) +{ + int i, j = 0; + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + u8 offset = 0; + u32 val = 0; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + switch (s->type) { + case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: + continue; + case BCMGENET_STAT_RUNT: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_TX: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_RX: + val = bcmgenet_umac_readl(priv, + UMAC_MIB_START + j + offset); + offset = 0; /* Reset Offset */ + break; + case BCMGENET_STAT_MISC: + if (GENET_IS_V1(priv)) { + val = bcmgenet_umac_readl(priv, s->reg_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, + s->reg_offset); + } else { + val = bcmgenet_update_stat_misc(priv, + s->reg_offset); + } + break; + } + + j += s->stat_sizeof; + p = (char *)priv + s->stat_offset; + *(u32 *)p = val; + } +} + +static void bcmgenet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_running(dev)) + bcmgenet_update_mib_counters(priv); + + dev->netdev_ops->ndo_get_stats(dev); + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + if (s->type == BCMGENET_STAT_NETDEV) + p = (char *)&dev->stats; + else + p = (char *)priv; + p += s->stat_offset; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; + } +} + +static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; + u32 reg; + + if (enable && !priv->clk_eee_enabled) { + clk_prepare_enable(priv->clk_eee); + priv->clk_eee_enabled = true; + } + + reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = bcmgenet_readl(priv->base + off); + if (enable) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); + bcmgenet_writel(reg, priv->base + off); + + /* Do the same for thing for RBUF */ + reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); + if (enable) + reg |= RBUF_EEE_EN | RBUF_PM_EN; + else + reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); + bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); + + if (!enable && priv->clk_eee_enabled) { + clk_disable_unprepare(priv->clk_eee); + priv->clk_eee_enabled = false; + } + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; +} + +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(dev->phydev, e); +} + +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + int ret = 0; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcmgenet_eee_enable_set(dev, false); + } else { + ret = phy_init_eee(dev->phydev, 0); + if (ret) { + netif_err(priv, hw, dev, "EEE initialization failed\n"); + return ret; + } + + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); + bcmgenet_eee_enable_set(dev, true); + } + + return phy_ethtool_set_eee(dev->phydev, e); +} + +static int bcmgenet_validate_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_usrip4_spec *l4_mask; + struct ethhdr *eth_mask; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) { + netdev_err(dev, "rxnfc: Invalid location (%d)\n", + cmd->fs.location); + return -EINVAL; + } + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case IP_USER_FLOW: + l4_mask = &cmd->fs.m_u.usr_ip4_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(l4_mask->ip4src) || + VALIDATE_MASK(l4_mask->ip4dst) || + VALIDATE_MASK(l4_mask->l4_4_bytes) || + VALIDATE_MASK(l4_mask->proto) || + VALIDATE_MASK(l4_mask->ip_ver) || + VALIDATE_MASK(l4_mask->tos)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + case ETHER_FLOW: + eth_mask = &cmd->fs.m_u.ether_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(eth_mask->h_dest) || + VALIDATE_MASK(eth_mask->h_source) || + VALIDATE_MASK(eth_mask->h_proto)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + default: + netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n", + cmd->fs.flow_type); + return -EINVAL; + } + + if ((cmd->fs.flow_type & FLOW_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || + VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { + netdev_err(dev, "rxnfc: user-def not supported\n"); + return -EINVAL; + } + } + + if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + } + + return 0; +} + +static int bcmgenet_insert_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *loc_rule; + int err; + + if (priv->hw_params->hfb_filter_size < 128) { + netdev_err(dev, "rxnfc: Not supported by this device\n"); + return -EINVAL; + } + + if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && + cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { + netdev_err(dev, "rxnfc: Unsupported action (%llu)\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + + err = bcmgenet_validate_flow(dev, cmd); + if (err) + return err; + + loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&loc_rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memcpy(&loc_rule->fs, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); + + list_add_tail(&loc_rule->list, &priv->rxnfc_list); + + return 0; +} + +static int bcmgenet_delete_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[cmd->fs.location]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { + err = -ENOENT; + goto out; + } + + if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); + +out: + return err; +} + +static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = bcmgenet_insert_flow(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = bcmgenet_delete_flow(dev, cmd); + break; + default: + netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", + cmd->cmd); + return -EINVAL; + } + + return err; +} + +static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, + int loc) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[loc]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) + err = -ENOENT; + else + memcpy(&cmd->fs, &rule->fs, + sizeof(struct ethtool_rx_flow_spec)); + + return err; +} + +static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv) +{ + struct list_head *pos; + int res = 0; + + list_for_each(pos, &priv->rxnfc_list) + res++; + + return res; +} + +static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + int i = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->hw_params->rx_queues ?: 1; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bcmgenet_get_num_flows(priv); + cmd->data = MAX_NUM_OF_FS_RULES; + break; + case ETHTOOL_GRXCLSRULE: + err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (i < cmd->rule_cnt) + rule_locs[i++] = rule->fs.location; + cmd->rule_cnt = i; + cmd->data = MAX_NUM_OF_FS_RULES; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +/* standard ethtool support functions. */ +static const struct ethtool_ops bcmgenet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .begin = bcmgenet_begin, + .complete = bcmgenet_complete, + .get_strings = bcmgenet_get_strings, + .get_sset_count = bcmgenet_get_sset_count, + .get_ethtool_stats = bcmgenet_get_ethtool_stats, + .get_drvinfo = bcmgenet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = bcmgenet_get_msglevel, + .set_msglevel = bcmgenet_set_msglevel, + .get_wol = bcmgenet_get_wol, + .set_wol = bcmgenet_set_wol, + .get_eee = bcmgenet_get_eee, + .set_eee = bcmgenet_set_eee, + .nway_reset = phy_ethtool_nway_reset, + .get_coalesce = bcmgenet_get_coalesce, + .set_coalesce = bcmgenet_set_coalesce, + .get_link_ksettings = bcmgenet_get_link_ksettings, + .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, + .get_rxnfc = bcmgenet_get_rxnfc, + .set_rxnfc = bcmgenet_set_rxnfc, +}; + +/* Power down the unimac, based on mode. */ +static int bcmgenet_power_down(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + int ret = 0; + u32 reg; + + switch (mode) { + case GENET_POWER_CABLE_SENSE: + phy_detach(priv->dev->phydev); + break; + + case GENET_POWER_WOL_MAGIC: + ret = bcmgenet_wol_power_down_cfg(priv, mode); + break; + + case GENET_POWER_PASSIVE: + /* Power down LED */ + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + if (GENET_IS_V5(priv)) + reg |= EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR; + else + reg |= EXT_PWR_DOWN_PHY; + + reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + + bcmgenet_phy_power_set(priv->dev, false); + } + break; + default: + break; + } + + return ret; +} + +static void bcmgenet_power_up(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (!(priv->hw_params->flags & GENET_HAS_EXT)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + + switch (mode) { + case GENET_POWER_PASSIVE: + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); + if (GENET_IS_V5(priv)) { + reg &= ~(EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR); + reg |= EXT_PHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + mdelay(1); + + reg &= ~EXT_PHY_RESET; + } else { + reg &= ~EXT_PWR_DOWN_PHY; + reg |= EXT_PWR_DN_EN_LD; + } + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_phy_power_set(priv->dev, true); + break; + + case GENET_POWER_CABLE_SENSE: + /* enable APD */ + if (!GENET_IS_V5(priv)) { + reg |= EXT_PWR_DN_EN_LD; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + break; + case GENET_POWER_WOL_MAGIC: + bcmgenet_wol_power_up_cfg(priv, mode); + return; + default: + break; + } +} + +static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Advancing local write pointer */ + if (ring->write_ptr == ring->end_ptr) + ring->write_ptr = ring->cb_ptr; + else + ring->write_ptr++; + + return tx_cb_ptr; +} + +static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Rewinding local write pointer */ + if (ring->write_ptr == ring->cb_ptr) + ring->write_ptr = ring->end_ptr; + else + ring->write_ptr--; + + return tx_cb_ptr; +} + +static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_SET); +} + +/* Simple helper to free a transmit control block's resources + * Returns an skb when the last transmit control block associated with the + * skb is freed. The skb should be freed by the caller if necessary. + */ +static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + + if (skb) { + cb->skb = NULL; + if (cb == GENET_CB(skb)->first_cb) + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + + if (cb == GENET_CB(skb)->last_cb) + return skb; + + } else if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_page(dev, + dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return NULL; +} + +/* Simple helper to free a receive control block's resources */ +static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + cb->skb = NULL; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return skb; +} + +/* Unlocked version of the reclaim routine */ +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int txbds_processed = 0; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; + unsigned int txbds_ready; + unsigned int c_index; + struct sk_buff *skb; + + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_CLEAR); + else + bcmgenet_intrl2_1_writel(priv, (1 << ring->index), + INTRL2_CPU_CLEAR); + + /* Compute how many buffers are transmitted since last xmit call */ + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) + & DMA_C_INDEX_MASK; + txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, tx_done, dev, + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); + + /* Reclaim transmitted buffers */ + while (txbds_processed < txbds_ready) { + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, + &priv->tx_cbs[ring->clean_ptr]); + if (skb) { + pkts_compl++; + bytes_compl += GENET_CB(skb)->bytes_sent; + dev_consume_skb_any(skb); + } + + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; + } + + ring->free_bds += txbds_processed; + ring->c_index = c_index; + + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + + netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), + pkts_compl, bytes_compl); + + return txbds_processed; +} + +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + unsigned int released; + + spin_lock_bh(&ring->lock); + released = __bcmgenet_tx_reclaim(dev, ring); + spin_unlock_bh(&ring->lock); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + struct netdev_queue *txq; + + spin_lock(&ring->lock); + work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); + netif_tx_wake_queue(txq); + } + spin_unlock(&ring->lock); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring); + + return 0; + } + + return budget; +} + +static void bcmgenet_tx_reclaim_all(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_is_multiqueue(dev)) { + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); + } + + bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); +} + +/* Reallocate the SKB to put enough headroom in front of it and insert + * the transmit checksum offsets in the descriptors + */ +static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, + struct sk_buff *skb) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct status_64 *status = NULL; + struct sk_buff *new_skb; + u16 offset; + u8 ip_proto; + __be16 ip_ver; + u32 tx_csum_info; + + if (unlikely(skb_headroom(skb) < sizeof(*status))) { + /* If 64 byte status block enabled, must make sure skb has + * enough headroom for us to insert 64B status block. + */ + new_skb = skb_realloc_headroom(skb, sizeof(*status)); + if (!new_skb) { + dev_kfree_skb_any(skb); + priv->mib.tx_realloc_tsb_failed++; + dev->stats.tx_dropped++; + return NULL; + } + dev_consume_skb_any(skb); + skb = new_skb; + priv->mib.tx_realloc_tsb++; + } + + skb_push(skb, sizeof(*status)); + status = (struct status_64 *)skb->data; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ip_ver = skb->protocol; + switch (ip_ver) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + /* don't use UDP flag */ + ip_proto = 0; + break; + } + + offset = skb_checksum_start_offset(skb) - sizeof(*status); + tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | + (offset + skb->csum_offset) | + STATUS_TX_CSUM_LV; + + /* Set the special UDP flag for UDP */ + if (ip_proto == IPPROTO_UDP) + tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; + + status->tx_csum_info = tx_csum_info; + } + + return skb; +} + +static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_tx_ring *ring = NULL; + struct enet_cb *tx_cb_ptr; + struct netdev_queue *txq; + int nr_frags, index; + dma_addr_t mapping; + unsigned int size; + skb_frag_t *frag; + u32 len_stat; + int ret; + int i; + + index = skb_get_queue_mapping(skb); + /* Mapping strategy: + * queue_mapping = 0, unclassified, packet xmited through ring16 + * queue_mapping = 1, goes to ring 0. (highest priority queue + * queue_mapping = 2, goes to ring 1. + * queue_mapping = 3, goes to ring 2. + * queue_mapping = 4, goes to ring 3. + */ + if (index == 0) + index = DESC_INDEX; + else + index -= 1; + + ring = &priv->tx_rings[index]; + txq = netdev_get_tx_queue(dev, ring->queue); + + nr_frags = skb_shinfo(skb)->nr_frags; + + spin_lock(&ring->lock); + if (ring->free_bds <= (nr_frags + 1)) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + netdev_err(dev, + "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Retain how many bytes will be sent on the wire, without TSB inserted + * by transmit checksum offload + */ + GENET_CB(skb)->bytes_sent = skb->len; + + /* add the Transmit Status Block */ + skb = bcmgenet_add_tsb(dev, skb); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + + for (i = 0; i <= nr_frags; i++) { + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + BUG_ON(!tx_cb_ptr); + + if (!i) { + /* Transmit single SKB or head of fragment list */ + GENET_CB(skb)->first_cb = tx_cb_ptr; + size = skb_headlen(skb); + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + /* xmit fragment */ + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); + } + + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); + ret = NETDEV_TX_OK; + goto out_unmap_frags; + } + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, size); + + tx_cb_ptr->skb = skb; + + len_stat = (size << DMA_BUFLENGTH_SHIFT) | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ + if (!i) { + len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_stat |= DMA_TX_DO_CSUM; + } + if (i == nr_frags) + len_stat |= DMA_EOP; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); + } + + GENET_CB(skb)->last_cb = tx_cb_ptr; + skb_tx_timestamp(skb); + + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= nr_frags + 1; + ring->prod_index += nr_frags + 1; + ring->prod_index &= DMA_P_INDEX_MASK; + + netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); + + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) + netif_tx_stop_queue(txq); + + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) + /* Packets are ready, update producer index */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); +out: + spin_unlock(&ring->lock); + + return ret; + +out_unmap_frags: + /* Back up for failed control block mapping */ + bcmgenet_put_txcb(priv, ring); + + /* Unmap successfully mapped control blocks */ + while (i-- > 0) { + tx_cb_ptr = bcmgenet_put_txcb(priv, ring); + bcmgenet_free_tx_cb(kdev, tx_cb_ptr); + } + + dev_kfree_skb(skb); + goto out; +} + +static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) +{ + struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; + struct sk_buff *rx_skb; + dma_addr_t mapping; + + /* Allocate a new Rx skb */ + skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, + GFP_ATOMIC | __GFP_NOWARN); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb allocation failed\n", __func__); + return NULL; + } + + /* DMA-map the new Rx skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + dev_kfree_skb_any(skb); + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + return NULL; + } + + /* Grab the current Rx skb from the ring and DMA-unmap it */ + rx_skb = bcmgenet_free_rx_cb(kdev, cb); + + /* Put the new Rx skb on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + + /* Return the current Rx skb to caller */ + return rx_skb; +} + +/* bcmgenet_desc_rx - descriptor based rx process. + * this could be called from bottom half, or from NAPI polling method. + */ +static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, + unsigned int budget) +{ + struct bcmgenet_priv *priv = ring->priv; + struct net_device *dev = priv->dev; + struct enet_cb *cb; + struct sk_buff *skb; + u32 dma_length_status; + unsigned long dma_flag; + int len; + unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int bytes_processed = 0; + unsigned int p_index, mask; + unsigned int discards; + + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) { + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_CLEAR); + } else { + mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); + bcmgenet_intrl2_1_writel(priv, + mask, + INTRL2_CPU_CLEAR); + } + + p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); + + discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & + DMA_P_INDEX_DISCARD_CNT_MASK; + if (discards > ring->old_discards) { + discards = discards - ring->old_discards; + ring->errors += discards; + ring->old_discards += discards; + + /* Clear HW register when we reach 75% of maximum 0xFFFF */ + if (ring->old_discards >= 0xC000) { + ring->old_discards = 0; + bcmgenet_rdma_ring_writel(priv, ring->index, 0, + RDMA_PROD_INDEX); + } + } + + p_index &= DMA_P_INDEX_MASK; + rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, rx_status, dev, + "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); + + while ((rxpktprocessed < rxpkttoprocess) && + (rxpktprocessed < budget)) { + struct status_64 *status; + __be16 rx_csum; + + cb = &priv->rx_cbs[ring->read_ptr]; + skb = bcmgenet_rx_refill(priv, cb); + + if (unlikely(!skb)) { + ring->dropped++; + goto next; + } + + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + if (dev->features & NETIF_F_RXCSUM) { + rx_csum = (__force __be16)(status->rx_csum & 0xffff); + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + /* DMA flags and length are still valid no matter how + * we got the Receive Status Vector (64B RSB or register) + */ + dma_flag = dma_length_status & 0xffff; + len = dma_length_status >> DMA_BUFLENGTH_SHIFT; + + netif_dbg(priv, rx_status, dev, + "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); + + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { + netif_err(priv, rx_status, dev, + "dropping fragmented packet!\n"); + ring->errors++; + dev_kfree_skb_any(skb); + goto next; + } + + /* report errors */ + if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER))) { + netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", + (unsigned int)dma_flag); + if (dma_flag & DMA_RX_CRC_ERROR) + dev->stats.rx_crc_errors++; + if (dma_flag & DMA_RX_OV) + dev->stats.rx_over_errors++; + if (dma_flag & DMA_RX_NO) + dev->stats.rx_frame_errors++; + if (dma_flag & DMA_RX_LG) + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } /* error packet */ + + skb_put(skb, len); + + /* remove RSB and hardware 2bytes added for IP alignment */ + skb_pull(skb, 66); + len -= 66; + + if (priv->crc_fwd_en) { + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + bytes_processed += len; + + /*Finish setting up the received SKB and send it to the kernel*/ + skb->protocol = eth_type_trans(skb, priv->dev); + ring->packets++; + ring->bytes += len; + if (dma_flag & DMA_RX_MULT) + dev->stats.multicast++; + + /* Notify kernel */ + napi_gro_receive(&ring->napi, skb); + netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); + +next: + rxpktprocessed++; + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); + } + + ring->dim.bytes = bytes_processed; + ring->dim.packets = rxpktprocessed; + + return rxpktprocessed; +} + +/* Rx NAPI polling method */ +static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_rx_ring *ring = container_of(napi, + struct bcmgenet_rx_ring, napi); + struct dim_sample dim_sample = {}; + unsigned int work_done; + + work_done = bcmgenet_desc_rx(ring, budget); + + if (work_done < budget) { + napi_complete_done(napi, work_done); + ring->int_enable(ring); + } + + if (ring->dim.use_dim) { + dim_update_sample(ring->dim.event_ctr, ring->dim.packets, + ring->dim.bytes, &dim_sample); + net_dim(&ring->dim.dim, dim_sample); + } + + return work_done; +} + +static void bcmgenet_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct bcmgenet_net_dim *ndim = + container_of(dim, struct bcmgenet_net_dim, dim); + struct bcmgenet_rx_ring *ring = + container_of(ndim, struct bcmgenet_rx_ring, dim); + struct dim_cq_moder cur_profile = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); + dim->state = DIM_START_MEASURE; +} + +/* Assign skb to RX DMA descriptor. */ +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) +{ + struct enet_cb *cb; + struct sk_buff *skb; + int i; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* loop here for each buffer needing assign */ + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; + skb = bcmgenet_rx_refill(priv, cb); + if (skb) + dev_consume_skb_any(skb); + if (!cb->skb) + return -ENOMEM; + } + + return 0; +} + +static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) +{ + struct sk_buff *skb; + struct enet_cb *cb; + int i; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[i]; + + skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); + if (skb) + dev_consume_skb_any(skb); + } +} + +static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) +{ + u32 reg; + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + return; + if (enable) + reg |= mask; + else + reg &= ~mask; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-size packet + * to be processed + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static void reset_umac(struct bcmgenet_priv *priv) +{ + /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ + bcmgenet_rbuf_ctrl_set(priv, 0); + udelay(10); + + /* issue soft reset and disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); + udelay(2); +} + +static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) +{ + /* Mask all interrupts.*/ + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); +} + +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + int0_enable |= UMAC_IRQ_PHY_DET_R; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + +static void init_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + u32 reg; + u32 int0_enable = 0; + + dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); + + reset_umac(priv); + + /* clear tx/rx counter */ + bcmgenet_umac_writel(priv, + MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, + UMAC_MIB_CTRL); + bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); + + bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* init tx registers, enable TSB */ + reg = bcmgenet_tbuf_ctrl_get(priv); + reg |= TBUF_64B_EN; + bcmgenet_tbuf_ctrl_set(priv, reg); + + /* init rx registers, enable ip header optimization and RSB */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + reg |= RBUF_ALIGN_2B | RBUF_64B_EN; + bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + + /* enable rx checksumming */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (priv->crc_fwd_en) + reg |= RBUF_SKIP_FCS; + else + reg &= ~RBUF_SKIP_FCS; + bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL); + + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) + bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); + + bcmgenet_intr_disable(priv); + + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + reg = bcmgenet_bp_mc_get(priv); + reg |= BIT(priv->hw_params->bp_in_en_shift); + + /* bp_mask: back pressure mask */ + if (netif_is_multiqueue(priv->dev)) + reg |= priv->hw_params->bp_in_mask; + else + reg &= ~priv->hw_params->bp_in_mask; + bcmgenet_bp_mc_set(priv, reg); + } + + /* Enable MDIO interrupts on GENET v3+ */ + if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) + int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + + dev_dbg(kdev, "done init umac\n"); +} + +static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, + void (*cb)(struct work_struct *work)) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + struct dim_cq_moder moder; + u32 usecs, pkts; + + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_rx_moderation(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +/* Initialize a Tx ring along with corresponding hardware registers */ +static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + u32 flow_period_val = 0; + + spin_lock_init(&ring->lock); + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->queue = 0; + ring->int_enable = bcmgenet_tx_ring16_int_enable; + ring->int_disable = bcmgenet_tx_ring16_int_disable; + } else { + ring->queue = index + 1; + ring->int_enable = bcmgenet_tx_ring_int_enable; + ring->int_disable = bcmgenet_tx_ring_int_disable; + } + ring->cbs = priv->tx_cbs + start_ptr; + ring->size = size; + ring->clean_ptr = start_ptr; + ring->c_index = 0; + ring->free_bds = size; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + ring->prod_index = 0; + + /* Set flow period for ring != 16 */ + if (index != DESC_INDEX) + flow_period_val = ENET_MAX_MTU_SIZE << 16; + + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); + /* Disable rate control for now */ + bcmgenet_tdma_ring_writel(priv, index, flow_period_val, + TDMA_FLOW_PERIOD); + bcmgenet_tdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + + /* Set start and end address, read and write pointers */ + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_READ_PTR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_WRITE_PTR); + bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + /* Initialize Tx NAPI */ + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); +} + +/* Initialize a RDMA ring */ +static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + int ret; + + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->int_enable = bcmgenet_rx_ring16_int_enable; + ring->int_disable = bcmgenet_rx_ring16_int_disable; + } else { + ring->int_enable = bcmgenet_rx_ring_int_enable; + ring->int_disable = bcmgenet_rx_ring_int_disable; + } + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) + return ret; + + bcmgenet_init_dim(ring, bcmgenet_dim_work); + bcmgenet_init_rx_coalesce(ring); + + /* Initialize Rx NAPI */ + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, + NAPI_POLL_WEIGHT); + + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + bcmgenet_rdma_ring_writel(priv, index, + (DMA_FC_THRESH_LO << + DMA_XOFF_THRESHOLD_SHIFT) | + DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + return ret; +} + +static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Tx queues + * + * Queues 0-3 are priority-based, each one has 32 descriptors, + * with queue 0 being the highest priority queue. + * + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. + * + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] + */ +static void bcmgenet_init_tx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; + u32 dma_priority[3] = {0, 0, 0}; + + dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Enable strict priority arbiter mode */ + bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + + /* Initialize Tx priority queues */ + for (i = 0; i < priv->hw_params->tx_queues; i++) { + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(i)] |= + ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); + } + + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= + ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << + DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ + bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); + bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); + bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); +} + +static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + + /* Enable rings */ + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Configure ring as descriptor ring and re-enable DMA if enabled */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; +} + +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + u32 dma_ctrl; + int i; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + return ret; +} + +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + struct netdev_queue *txq; + int i; + + bcmgenet_fini_rx_napi(priv); + bcmgenet_fini_tx_napi(priv); + + for (i = 0; i < priv->num_tx_bds; i++) + dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, + priv->tx_cbs + i)); + + for (i = 0; i < priv->hw_params->tx_queues; i++) { + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); + netdev_tx_reset_queue(txq); + } + + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); + netdev_tx_reset_queue(txq); + + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); +} + +/* init_edma: Initialize DMA control register */ +static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +{ + int ret; + unsigned int i; + struct enet_cb *cb; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } + + /* Initialize common TX ring structures */ + priv->tx_bds = priv->base + priv->hw_params->tdma_offset; + priv->num_tx_bds = TOTAL_DESC; + priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->tx_cbs) { + kfree(priv->rx_cbs); + return -ENOMEM; + } + + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } + + /* Init rDma */ + bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); + if (ret) { + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); + return ret; + } + + /* Init tDma */ + bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); + + return 0; +} + +/* Interrupt bottom half */ +static void bcmgenet_irq_task(struct work_struct *work) +{ + unsigned int status; + struct bcmgenet_priv *priv = container_of( + work, struct bcmgenet_priv, bcmgenet_irq_work); + + netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + + spin_lock_irq(&priv->lock); + status = priv->irq0_stat; + priv->irq0_stat = 0; + spin_unlock_irq(&priv->lock); + + if (status & UMAC_IRQ_PHY_DET_R && + priv->dev->phydev->autoneg != AUTONEG_ENABLE) { + phy_init_hw(priv->dev->phydev); + genphy_config_aneg(priv->dev->phydev); + } + + /* Link UP/DOWN event */ + if (status & UMAC_IRQ_LINK_EVENT) + phy_mac_interrupt(priv->dev->phydev); + +} + +/* bcmgenet_isr1: handle Rx and Tx priority queues */ +static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int index, status; + + /* Read irq status */ + status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "%s: IRQ=0x%x\n", __func__, status); + + /* Check Rx priority queue interrupts */ + for (index = 0; index < priv->hw_params->rx_queues; index++) { + if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + continue; + + rx_ring = &priv->rx_rings[index]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + /* Check Tx priority queue interrupts */ + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(status & BIT(index))) + continue; + + tx_ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + return IRQ_HANDLED; +} + +/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ +static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int status; + unsigned long flags; + + /* Read irq status */ + status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "IRQ=0x%x\n", status); + + if (status & UMAC_IRQ_RXDMA_DONE) { + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + if (status & UMAC_IRQ_TXDMA_DONE) { + tx_ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + wake_up(&priv->wq); + } + + /* all other interested interrupts handled in bottom half */ + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); + if (status) { + /* Save irq status for bottom-half processing. */ + spin_lock_irqsave(&priv->lock, flags); + priv->irq0_stat |= status; + spin_unlock_irqrestore(&priv->lock, flags); + + schedule_work(&priv->bcmgenet_irq_work); + } + + return IRQ_HANDLED; +} + +static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) +{ + /* Acknowledge the interrupt */ + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcmgenet_poll_controller(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Invoke the main RX/TX interrupt handler */ + disable_irq(priv->irq0); + bcmgenet_isr0(priv->irq0, priv); + enable_irq(priv->irq0); + + /* And the interrupt handler for RX/TX priority queues */ + disable_irq(priv->irq1); + bcmgenet_isr1(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + +static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) +{ + u32 reg; + + reg = bcmgenet_rbuf_ctrl_get(priv); + reg |= BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + + reg &= ~BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); +} + +static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0); + bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1); +} + +static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + u32 addr_tmp; + + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0); + put_unaligned_be32(addr_tmp, &addr[0]); + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1); + put_unaligned_be16(addr_tmp, &addr[4]); +} + +/* Returns a reusable dma control register value */ +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +{ + unsigned int i; + u32 reg; + u32 dma_ctrl; + + /* disable DMA */ + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); + udelay(10); + bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + + return dma_ctrl; +} + +static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) +{ + u32 reg; + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static void bcmgenet_netif_start(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Start the network engine */ + bcmgenet_set_rx_mode(dev); + bcmgenet_enable_rx_napi(priv); + + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); + + bcmgenet_enable_tx_napi(priv); + + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + + phy_start(dev->phydev); +} + +static int bcmgenet_open(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + int ret; + + netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); + + /* Turn on the clock */ + clk_prepare_enable(priv->clk); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + /* take MAC out of reset */ + bcmgenet_umac_reset(priv); + + init_umac(priv); + + /* Apply features again in case we changed them while interface was + * down + */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto err_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + /* HFB init */ + bcmgenet_hfb_init(priv); + + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq0); + goto err_fini_dma; + } + + ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq1); + goto err_irq0; + } + + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } + + bcmgenet_netif_start(dev); + + netif_tx_start_all_queues(dev); + + return 0; + +err_irq1: + free_irq(priv->irq1, priv); +err_irq0: + free_irq(priv->irq0, priv); +err_fini_dma: + bcmgenet_dma_teardown(priv); + bcmgenet_fini_dma(priv); +err_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static void bcmgenet_netif_stop(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + bcmgenet_disable_tx_napi(priv); + netif_tx_disable(dev); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + bcmgenet_dma_teardown(priv); + + /* Disable MAC transmit. TX DMA disabled must be done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + + phy_stop(dev->phydev); + bcmgenet_disable_rx_napi(priv); + bcmgenet_intr_disable(priv); + + /* Wait for pending work items to complete. Since interrupts are + * disabled no new work will be scheduled. + */ + cancel_work_sync(&priv->bcmgenet_irq_work); + + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); +} + +static int bcmgenet_close(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); + + bcmgenet_netif_stop(dev); + + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(dev->phydev); + + free_irq(priv->irq0, priv); + free_irq(priv->irq1, priv); + + if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = ring->priv; + u32 p_index, c_index, intsts, intmsk; + struct netdev_queue *txq; + unsigned int free_bds; + bool txq_stopped; + + if (!netif_msg_tx_err(priv)) + return; + + txq = netdev_get_tx_queue(priv->dev, ring->queue); + + spin_lock(&ring->lock); + if (ring->index == DESC_INDEX) { + intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; + } else { + intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = 1 << ring->index; + } + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); + txq_stopped = netif_tx_queue_stopped(txq); + free_bds = ring->free_bds; + spin_unlock(&ring->lock); + + netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" + "TX queue status: %s, interrupts: %s\n" + "(sw)free_bds: %d (sw)size: %d\n" + "(sw)p_index: %d (hw)p_index: %d\n" + "(sw)c_index: %d (hw)c_index: %d\n" + "(sw)clean_p: %d (sw)write_p: %d\n" + "(sw)cb_ptr: %d (sw)end_ptr: %d\n", + ring->index, ring->queue, + txq_stopped ? "stopped" : "active", + intsts & intmsk ? "enabled" : "disabled", + free_bds, ring->size, + ring->prod_index, p_index & DMA_P_INDEX_MASK, + ring->c_index, c_index & DMA_C_INDEX_MASK, + ring->clean_ptr, ring->write_ptr, + ring->cb_ptr, ring->end_ptr); +} + +static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 int0_enable = 0; + u32 int1_enable = 0; + unsigned int q; + + netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + bcmgenet_dump_tx_queue(&priv->tx_rings[q]); + bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); + + bcmgenet_tx_reclaim_all(dev); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + int1_enable |= (1 << q); + + int0_enable = UMAC_IRQ_TXDMA_DONE; + + /* Re-enable TX interrupts if disabled */ + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + + netif_trans_update(dev); + + dev->stats.tx_errors++; + + netif_tx_wake_all_queues(dev); +} + +#define MAX_MDF_FILTER 17 + +static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, + unsigned char *addr, + int *i) +{ + bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], + UMAC_MDF_ADDR + (*i * 4)); + bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | + addr[4] << 8 | addr[5], + UMAC_MDF_ADDR + ((*i + 1) * 4)); + *i += 2; +} + +static void bcmgenet_set_rx_mode(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, nfilter; + u32 reg; + + netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); + + /* Number of filters needed */ + nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2; + + /* + * Turn on promicuous mode for three scenarios + * 1. IFF_PROMISC flag is set + * 2. IFF_ALLMULTI flag is set + * 3. The number of filters needed exceeds the number filters + * supported by the hardware. + */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || + (nfilter > MAX_MDF_FILTER)) { + reg |= CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); + return; + } else { + reg &= ~CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } + + /* update MDF filter */ + i = 0; + /* Broadcast */ + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); + /* my own address.*/ + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); + + /* Unicast */ + netdev_for_each_uc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Multicast */ + netdev_for_each_mc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Enable filters */ + reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); +} + +/* Set the hardware MAC address. */ +static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + /* Setting the MAC address at the hardware level is not possible + * without disabling the UniMAC RX/TX enable bits. + */ + if (netif_running(dev)) + return -EBUSY; + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + +static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long tx_bytes = 0, tx_packets = 0; + unsigned long rx_bytes = 0, rx_packets = 0; + unsigned long rx_errors = 0, rx_dropped = 0; + struct bcmgenet_tx_ring *tx_ring; + struct bcmgenet_rx_ring *rx_ring; + unsigned int q; + + for (q = 0; q < priv->hw_params->tx_queues; q++) { + tx_ring = &priv->tx_rings[q]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + } + tx_ring = &priv->tx_rings[DESC_INDEX]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + + for (q = 0; q < priv->hw_params->rx_queues; q++) { + rx_ring = &priv->rx_rings[q]; + + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + } + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_packets = rx_packets; + dev->stats.rx_errors = rx_errors; + dev->stats.rx_missed_errors = rx_errors; + dev->stats.rx_dropped = rx_dropped; + return &dev->stats; +} + +static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || + priv->phy_interface != PHY_INTERFACE_MODE_MOCA) + return -EOPNOTSUPP; + + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + return 0; +} + +static const struct net_device_ops bcmgenet_netdev_ops = { + .ndo_open = bcmgenet_open, + .ndo_stop = bcmgenet_close, + .ndo_start_xmit = bcmgenet_xmit, + .ndo_tx_timeout = bcmgenet_timeout, + .ndo_set_rx_mode = bcmgenet_set_rx_mode, + .ndo_set_mac_address = bcmgenet_set_mac_addr, + .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_set_features = bcmgenet_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcmgenet_poll_controller, +#endif + .ndo_get_stats = bcmgenet_get_stats, + .ndo_change_carrier = bcmgenet_change_carrier, +}; + +/* Array of GENET hardware parameters/characteristics */ +static struct bcmgenet_hw_params bcmgenet_hw_params[] = { + [GENET_V1] = { + .tx_queues = 0, + .tx_bds_per_q = 0, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .hfb_offset = 0x1000, + .rdma_offset = 0x2000, + .tdma_offset = 0x3000, + .words_per_bd = 2, + }, + [GENET_V2] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x1000, + .hfb_reg_offset = 0x2000, + .rdma_offset = 0x3000, + .tdma_offset = 0x4000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT, + }, + [GENET_V3] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x10000, + .tdma_offset = 0x11000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | + GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V4] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V5] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, +}; + +/* Infer hardware parameters from the detected GENET version */ +static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) +{ + struct bcmgenet_hw_params *params; + u32 reg; + u8 major; + u16 gphy_rev; + + if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v4; + } else if (GENET_IS_V3(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V2(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v2; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V1(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v1; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } + + /* enum genet_version starts at 1 */ + priv->hw_params = &bcmgenet_hw_params[priv->version]; + params = priv->hw_params; + + /* Read GENET HW version */ + reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); + major = (reg >> 24 & 0x0f); + if (major == 6) + major = 5; + else if (major == 5) + major = 4; + else if (major == 0) + major = 1; + if (major != priv->version) { + dev_err(&priv->pdev->dev, + "GENET version mismatch, got: %d, configured for: %d\n", + major, priv->version); + } + + /* Print the GENET core version */ + dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, + major, (reg >> 16) & 0x0f, reg & 0xffff); + + /* Store the integrated PHY revision for the MDIO probing function + * to pass this information to the PHY driver. The PHY driver expects + * to find the PHY major revision in bits 15:8 while the GENET register + * stores that information in bits 7:0, account for that. + * + * On newer chips, starting with PHY revision G0, a new scheme is + * deployed similar to the Starfighter 2 switch with GPHY major + * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 + * is reserved as well as special value 0x01ff, we have a small + * heuristic to check for the new GPHY revision and re-arrange things + * so the GPHY driver is happy. + */ + gphy_rev = reg & 0xffff; + + if (GENET_IS_V5(priv)) { + /* The EPHY revision should come from the MDIO registers of + * the PHY not from GENET. + */ + if (gphy_rev != 0) { + pr_warn("GENET is reporting EPHY revision: 0x%04x\n", + gphy_rev); + } + /* This is reserved so should require special treatment */ + } else if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + /* This is the good old scheme, just GPHY major, no minor nor patch */ + } else if ((gphy_rev & 0xf0) != 0) { + priv->gphy_rev = gphy_rev << 8; + /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ + } else if ((gphy_rev & 0xff00) != 0) { + priv->gphy_rev = gphy_rev; + } + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!(params->flags & GENET_HAS_40BITS)) + pr_warn("GENET does not support 40-bits PA\n"); +#endif + + pr_debug("Configuration for version: %d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" + "BP << en: %2d, BP msk: 0x%05x\n" + "HFB count: %2d, QTAQ msk: 0x%05x\n" + "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" + "RDMA: 0x%05x, TDMA: 0x%05x\n" + "Words/BD: %d\n", + priv->version, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, + params->bp_in_en_shift, params->bp_in_mask, + params->hfb_filter_cnt, params->qtag_mask, + params->tbuf_offset, params->hfb_offset, + params->hfb_reg_offset, + params->rdma_offset, params->tdma_offset, + params->words_per_bd); +} + +struct bcmgenet_plat_data { + enum bcmgenet_version version; + u32 dma_max_burst_length; +}; + +static const struct bcmgenet_plat_data v1_plat_data = { + .version = GENET_V1, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v2_plat_data = { + .version = GENET_V2, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v3_plat_data = { + .version = GENET_V3, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v4_plat_data = { + .version = GENET_V4, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v5_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data bcm2711_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = 0x08, +}; + +static const struct of_device_id bcmgenet_match[] = { + { .compatible = "brcm,genet-v1", .data = &v1_plat_data }, + { .compatible = "brcm,genet-v2", .data = &v2_plat_data }, + { .compatible = "brcm,genet-v3", .data = &v3_plat_data }, + { .compatible = "brcm,genet-v4", .data = &v4_plat_data }, + { .compatible = "brcm,genet-v5", .data = &v5_plat_data }, + { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcmgenet_match); + +static int bcmgenet_probe(struct platform_device *pdev) +{ + struct bcmgenet_platform_data *pd = pdev->dev.platform_data; + const struct bcmgenet_plat_data *pdata; + struct bcmgenet_priv *priv; + struct net_device *dev; + unsigned int i; + int err = -EIO; + + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); + if (!dev) { + dev_err(&pdev->dev, "can't allocate net device\n"); + return -ENOMEM; + } + + priv = netdev_priv(dev); + priv->irq0 = platform_get_irq(pdev, 0); + if (priv->irq0 < 0) { + err = priv->irq0; + goto err; + } + priv->irq1 = platform_get_irq(pdev, 1); + if (priv->irq1 < 0) { + err = priv->irq1; + goto err; + } + priv->wol_irq = platform_get_irq_optional(pdev, 2); + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + err = PTR_ERR(priv->base); + goto err; + } + + spin_lock_init(&priv->lock); + + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + dev->watchdog_timeo = 2 * HZ; + dev->ethtool_ops = &bcmgenet_ethtool_ops; + dev->netdev_ops = &bcmgenet_netdev_ops; + + priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); + + /* Set default features */ + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; + + /* Request the WOL interrupt and advertise suspend if available */ + priv->wol_irq_disabled = true; + err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, + dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + + /* Set the needed headroom to account for any possible + * features enabling/disabling at runtime + */ + dev->needed_headroom += 64; + + netdev_boot_setup_check(dev); + + priv->dev = dev; + priv->pdev = pdev; + + pdata = device_get_match_data(&pdev->dev); + if (pdata) { + priv->version = pdata->version; + priv->dma_max_burst_length = pdata->dma_max_burst_length; + } else { + priv->version = pd->genet_version; + priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; + } + + priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); + if (IS_ERR(priv->clk)) { + dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); + err = PTR_ERR(priv->clk); + goto err; + } + + err = clk_prepare_enable(priv->clk); + if (err) + goto err; + + bcmgenet_set_hw_params(priv); + + err = -EIO; + if (priv->hw_params->flags & GENET_HAS_40BITS) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_clk_disable; + + /* Mii wait queue */ + init_waitqueue_head(&priv->wq); + /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ + priv->rx_buf_len = RX_BUF_LENGTH; + INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); + + priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); + if (IS_ERR(priv->clk_wol)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); + err = PTR_ERR(priv->clk_wol); + goto err_clk_disable; + } + + priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); + if (IS_ERR(priv->clk_eee)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); + err = PTR_ERR(priv->clk_eee); + goto err_clk_disable; + } + + /* If this is an internal GPHY, power it on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + if (pd && !IS_ERR_OR_NULL(pd->mac_address)) + ether_addr_copy(dev->dev_addr, pd->mac_address); + else + if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN)) + if (has_acpi_companion(&pdev->dev)) + bcmgenet_get_hw_addr(priv, dev->dev_addr); + + if (!is_valid_ether_addr(dev->dev_addr)) { + dev_warn(&pdev->dev, "using random Ethernet MAC\n"); + eth_hw_addr_random(dev); + } + + reset_umac(priv); + + err = bcmgenet_mii_init(dev); + if (err) + goto err_clk_disable; + + /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues + * just the ring 16 descriptor based TX + */ + netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); + netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + + /* Set default coalescing parameters */ + for (i = 0; i < priv->hw_params->rx_queues; i++) + priv->rx_rings[i].rx_max_coalesced_frames = 1; + priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; + + /* libphy will determine the link state */ + netif_carrier_off(dev); + + /* Turn off the main clock, WOL clock is handled separately */ + clk_disable_unprepare(priv->clk); + + err = register_netdev(dev); + if (err) { + bcmgenet_mii_exit(dev); + goto err; + } + + return err; + +err_clk_disable: + clk_disable_unprepare(priv->clk); +err: + free_netdev(dev); + return err; +} + +static int bcmgenet_remove(struct platform_device *pdev) +{ + struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); + + dev_set_drvdata(&pdev->dev, NULL); + unregister_netdev(priv->dev); + bcmgenet_mii_exit(priv->dev); + free_netdev(priv->dev); + + return 0; +} + +static void bcmgenet_shutdown(struct platform_device *pdev) +{ + bcmgenet_remove(pdev); +} + +#ifdef CONFIG_PM_SLEEP +static int bcmgenet_resume_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + u32 reg; + + if (!netif_running(dev)) + return 0; + + /* Turn on the clock */ + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + if (device_may_wakeup(d) && priv->wolopts) { + /* Account for Wake-on-LAN events and clear those events + * (Some devices need more time between enabling the clocks + * and the interrupt register reflecting the wake event so + * read the register twice) + */ + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + if (reg & UMAC_IRQ_WAKE_EVENT) + pm_wakeup_event(&priv->pdev->dev, 0); + } + + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR); + + return 0; +} + +static int bcmgenet_resume(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + unsigned long dma_ctrl; + int ret; + + if (!netif_running(dev)) + return 0; + + /* From WOL-enabled suspend, switch to regular clock */ + if (device_may_wakeup(d) && priv->wolopts) + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + bcmgenet_umac_reset(priv); + + init_umac(priv); + + phy_init_hw(dev->phydev); + + /* Speed settings must be restored */ + genphy_config_aneg(dev->phydev); + bcmgenet_mii_config(priv->dev, false); + + /* Restore enabled features */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Restore hardware filters */ + bcmgenet_hfb_clear(priv); + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + bcmgenet_hfb_create_rxnfc_filter(priv, rule); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto out_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + if (!device_may_wakeup(d)) + phy_resume(dev->phydev); + + if (priv->eee.eee_enabled) + bcmgenet_eee_enable_set(dev, true); + + bcmgenet_netif_start(dev); + + netif_device_attach(dev); + + return 0; + +out_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static int bcmgenet_suspend(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + + bcmgenet_netif_stop(dev); + + if (!device_may_wakeup(d)) + phy_suspend(dev->phydev); + + /* Disable filtering */ + bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); + + return 0; +} + +static int bcmgenet_suspend_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + if (!netif_running(dev)) + return 0; + + /* Prepare the device for Wake-on-LAN and switch to the slow clock */ + if (device_may_wakeup(d) && priv->wolopts) + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); + else if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + /* Let the framework handle resumption and leave the clocks on */ + if (ret) + return ret; + + /* Turn off the clocks */ + clk_disable_unprepare(priv->clk); + + return 0; +} +#else +#define bcmgenet_suspend NULL +#define bcmgenet_suspend_noirq NULL +#define bcmgenet_resume NULL +#define bcmgenet_resume_noirq NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops bcmgenet_pm_ops = { + .suspend = bcmgenet_suspend, + .suspend_noirq = bcmgenet_suspend_noirq, + .resume = bcmgenet_resume, + .resume_noirq = bcmgenet_resume_noirq, +}; + +static const struct acpi_device_id genet_acpi_match[] = { + { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, genet_acpi_match); + +static struct platform_driver bcmgenet_driver = { + .probe = bcmgenet_probe, + .remove = bcmgenet_remove, + .shutdown = bcmgenet_shutdown, + .driver = { + .name = "bcmgenet", + .of_match_table = bcmgenet_match, + .pm = &bcmgenet_pm_ops, + .acpi_match_table = genet_acpi_match, + }, +}; +module_platform_driver(bcmgenet_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); +MODULE_ALIAS("platform:bcmgenet"); +MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: mdio-bcm-unimac"); diff --git a/devices/genet/bcmgenet-5.14-orig.h b/devices/genet/bcmgenet-5.14-orig.h new file mode 100644 index 00000000..0a6d91b0 --- /dev/null +++ b/devices/genet/bcmgenet-5.14-orig.h @@ -0,0 +1,704 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2020 Broadcom + */ + +#ifndef __BCMGENET_H__ +#define __BCMGENET_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../unimac.h" + +/* total number of Buffer Descriptors, same for Rx/Tx */ +#define TOTAL_DESC 256 + +/* which ring is descriptor based */ +#define DESC_INDEX 16 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN 6 +#define ENET_PAD 8 +#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ + ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) +#define DMA_MAX_BURST_LENGTH 0x10 + +/* misc. configuration */ +#define MAX_NUM_OF_FS_RULES 16 +#define CLEAR_ALL_HFB 0xFF +#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4) +#define DMA_FC_THRESH_LO 5 + +/* 64B receive/transmit status block */ +struct status_64 { + u32 length_status; /* length and peripheral status */ + u32 ext_status; /* Extended status*/ + u32 rx_csum; /* partial rx checksum */ + u32 unused1[9]; /* unused */ + u32 tx_csum_info; /* Tx checksum info. */ + u32 unused2[3]; /* unused */ +}; + +/* Rx status bits */ +#define STATUS_RX_EXT_MASK 0x1FFFFF +#define STATUS_RX_CSUM_MASK 0xFFFF +#define STATUS_RX_CSUM_OK 0x10000 +#define STATUS_RX_CSUM_FR 0x20000 +#define STATUS_RX_PROTO_TCP 0 +#define STATUS_RX_PROTO_UDP 1 +#define STATUS_RX_PROTO_ICMP 2 +#define STATUS_RX_PROTO_OTHER 3 +#define STATUS_RX_PROTO_MASK 3 +#define STATUS_RX_PROTO_SHIFT 18 +#define STATUS_FILTER_INDEX_MASK 0xFFFF +/* Tx status bits */ +#define STATUS_TX_CSUM_START_MASK 0X7FFF +#define STATUS_TX_CSUM_START_SHIFT 16 +#define STATUS_TX_CSUM_PROTO_UDP 0x8000 +#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF +#define STATUS_TX_CSUM_LV 0x80000000 + +/* DMA Descriptor */ +#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */ +#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */ +#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */ + +/* Rx/Tx common counter group */ +struct bcmgenet_pkt_counters { + u32 cnt_64; /* RO Received/Transmited 64 bytes packet */ + u32 cnt_127; /* RO Rx/Tx 127 bytes packet */ + u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */ + u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */ + u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */ + u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */ + u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */ + u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/ + u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/ + u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcmgenet_rx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkt; /* RO (0x428) Received pkt count*/ + u32 bytes; /* RO Received byte count */ + u32 mca; /* RO # of Received multicast pkt */ + u32 bca; /* RO # of Receive broadcast pkt */ + u32 fcs; /* RO # of Received FCS error */ + u32 cf; /* RO # of Received control frame pkt*/ + u32 pf; /* RO # of Received pause frame pkt */ + u32 uo; /* RO # of unknown op code pkt */ + u32 aln; /* RO # of alignment error count */ + u32 flr; /* RO # of frame length out of range count */ + u32 cde; /* RO # of code error pkt */ + u32 fcr; /* RO # of carrier sense error pkt */ + u32 ovr; /* RO # of oversize pkt*/ + u32 jbr; /* RO # of jabber count */ + u32 mtue; /* RO # of MTU error pkt*/ + u32 pok; /* RO # of Received good pkt */ + u32 uc; /* RO # of unicast pkt */ + u32 ppp; /* RO # of PPP pkt */ + u32 rcrc; /* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcmgenet_tx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkts; /* RO (0x4a8) Transmited pkt */ + u32 mca; /* RO # of xmited multicast pkt */ + u32 bca; /* RO # of xmited broadcast pkt */ + u32 pf; /* RO # of xmited pause frame count */ + u32 cf; /* RO # of xmited control frame count */ + u32 fcs; /* RO # of xmited FCS error count */ + u32 ovr; /* RO # of xmited oversize pkt */ + u32 drf; /* RO # of xmited deferral pkt */ + u32 edf; /* RO # of xmited Excessive deferral pkt*/ + u32 scl; /* RO # of xmited single collision pkt */ + u32 mcl; /* RO # of xmited multiple collision pkt*/ + u32 lcl; /* RO # of xmited late collision pkt */ + u32 ecl; /* RO # of xmited excessive collision pkt*/ + u32 frg; /* RO # of xmited fragments pkt*/ + u32 ncl; /* RO # of xmited total collision count */ + u32 jbr; /* RO # of xmited jabber count*/ + u32 bytes; /* RO # of xmited byte count */ + u32 pok; /* RO # of xmited good pkt */ + u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcmgenet_mib_counters { + struct bcmgenet_rx_counters rx; + struct bcmgenet_tx_counters tx; + u32 rx_runt_cnt; + u32 rx_runt_fcs; + u32 rx_runt_fcs_align; + u32 rx_runt_bytes; + u32 rbuf_ovflow_cnt; + u32 rbuf_err_cnt; + u32 mdf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed; +}; + +#define UMAC_MIB_START 0x400 + +#define UMAC_MDIO_CMD 0x614 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define UMAC_RBUF_OVFL_CNT_V1 0x61C +#define RBUF_OVFL_CNT_V2 0x80 +#define RBUF_OVFL_CNT_V3PLUS 0x94 + +#define UMAC_MPD_CTRL 0x620 +#define MPD_EN (1 << 0) +#define MPD_PW_EN (1 << 27) +#define MPD_MSEQ_LEN_SHIFT 16 +#define MPD_MSEQ_LEN_MASK 0xFF + +#define UMAC_MPD_PW_MS 0x624 +#define UMAC_MPD_PW_LS 0x628 +#define UMAC_RBUF_ERR_CNT_V1 0x634 +#define RBUF_ERR_CNT_V2 0x84 +#define RBUF_ERR_CNT_V3PLUS 0x98 +#define UMAC_MDF_ERR_CNT 0x638 +#define UMAC_MDF_CTRL 0x650 +#define UMAC_MDF_ADDR 0x654 +#define UMAC_MIB_CTRL 0x580 +#define MIB_RESET_RX (1 << 0) +#define MIB_RESET_RUNT (1 << 1) +#define MIB_RESET_TX (1 << 2) + +#define RBUF_CTRL 0x00 +#define RBUF_64B_EN (1 << 0) +#define RBUF_ALIGN_2B (1 << 1) +#define RBUF_BAD_DIS (1 << 2) + +#define RBUF_STATUS 0x0C +#define RBUF_STATUS_WOL (1 << 0) +#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1) +#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2) + +#define RBUF_CHK_CTRL 0x14 +#define RBUF_RXCHK_EN (1 << 0) +#define RBUF_SKIP_FCS (1 << 4) +#define RBUF_L3_PARSE_DIS (1 << 5) + +#define RBUF_ENERGY_CTRL 0x9c +#define RBUF_EEE_EN (1 << 0) +#define RBUF_PM_EN (1 << 1) + +#define RBUF_TBUF_SIZE_CTRL 0xb4 + +#define RBUF_HFB_CTRL_V1 0x38 +#define RBUF_HFB_FILTER_EN_SHIFT 16 +#define RBUF_HFB_FILTER_EN_MASK 0xffff0000 +#define RBUF_HFB_EN (1 << 0) +#define RBUF_HFB_256B (1 << 1) +#define RBUF_ACPI_EN (1 << 2) + +#define RBUF_HFB_LEN_V1 0x3C +#define RBUF_FLTR_LEN_MASK 0xFF +#define RBUF_FLTR_LEN_SHIFT 8 + +#define TBUF_CTRL 0x00 +#define TBUF_64B_EN (1 << 0) +#define TBUF_BP_MC 0x0C +#define TBUF_ENERGY_CTRL 0x14 +#define TBUF_EEE_EN (1 << 0) +#define TBUF_PM_EN (1 << 1) + +#define TBUF_CTRL_V1 0x80 +#define TBUF_BP_MC_V1 0xA0 + +#define HFB_CTRL 0x00 +#define HFB_FLT_ENABLE_V3PLUS 0x04 +#define HFB_FLT_LEN_V2 0x04 +#define HFB_FLT_LEN_V3PLUS 0x1C + +/* uniMac intrl2 registers */ +#define INTRL2_CPU_STAT 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0C +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* INTRL2 instance 0 definitions */ +#define UMAC_IRQ_SCB (1 << 0) +#define UMAC_IRQ_EPHY (1 << 1) +#define UMAC_IRQ_PHY_DET_R (1 << 2) +#define UMAC_IRQ_PHY_DET_F (1 << 3) +#define UMAC_IRQ_LINK_UP (1 << 4) +#define UMAC_IRQ_LINK_DOWN (1 << 5) +#define UMAC_IRQ_LINK_EVENT (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN) +#define UMAC_IRQ_UMAC (1 << 6) +#define UMAC_IRQ_UMAC_TSV (1 << 7) +#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8) +#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9) +#define UMAC_IRQ_HFB_SM (1 << 10) +#define UMAC_IRQ_HFB_MM (1 << 11) +#define UMAC_IRQ_MPD_R (1 << 12) +#define UMAC_IRQ_WAKE_EVENT (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \ + UMAC_IRQ_MPD_R) +#define UMAC_IRQ_RXDMA_MBDONE (1 << 13) +#define UMAC_IRQ_RXDMA_PDONE (1 << 14) +#define UMAC_IRQ_RXDMA_BDONE (1 << 15) +#define UMAC_IRQ_RXDMA_DONE UMAC_IRQ_RXDMA_MBDONE +#define UMAC_IRQ_TXDMA_MBDONE (1 << 16) +#define UMAC_IRQ_TXDMA_PDONE (1 << 17) +#define UMAC_IRQ_TXDMA_BDONE (1 << 18) +#define UMAC_IRQ_TXDMA_DONE UMAC_IRQ_TXDMA_MBDONE + +/* Only valid for GENETv3+ */ +#define UMAC_IRQ_MDIO_DONE (1 << 23) +#define UMAC_IRQ_MDIO_ERROR (1 << 24) + +/* INTRL2 instance 1 definitions */ +#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_SHIFT 16 + +/* Register block offsets */ +#define GENET_SYS_OFF 0x0000 +#define GENET_GR_BRIDGE_OFF 0x0040 +#define GENET_EXT_OFF 0x0080 +#define GENET_INTRL2_0_OFF 0x0200 +#define GENET_INTRL2_1_OFF 0x0240 +#define GENET_RBUF_OFF 0x0300 +#define GENET_UMAC_OFF 0x0800 + +/* SYS block offsets and register definitions */ +#define SYS_REV_CTRL 0x00 +#define SYS_PORT_CTRL 0x04 +#define PORT_MODE_INT_EPHY 0 +#define PORT_MODE_INT_GPHY 1 +#define PORT_MODE_EXT_EPHY 2 +#define PORT_MODE_EXT_GPHY 3 +#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4)) +#define PORT_MODE_EXT_RVMII_50 4 +#define LED_ACT_SOURCE_MAC (1 << 9) + +#define SYS_RBUF_FLUSH_CTRL 0x08 +#define SYS_TBUF_FLUSH_CTRL 0x0C +#define RBUF_FLUSH_CTRL_V1 0x04 + +/* Ext block register offsets and definitions */ +#define EXT_EXT_PWR_MGMT 0x00 +#define EXT_PWR_DOWN_BIAS (1 << 0) +#define EXT_PWR_DOWN_DLL (1 << 1) +#define EXT_PWR_DOWN_PHY (1 << 2) +#define EXT_PWR_DN_EN_LD (1 << 3) +#define EXT_ENERGY_DET (1 << 4) +#define EXT_IDDQ_FROM_PHY (1 << 5) +#define EXT_IDDQ_GLBL_PWR (1 << 7) +#define EXT_PHY_RESET (1 << 8) +#define EXT_ENERGY_DET_MASK (1 << 12) +#define EXT_PWR_DOWN_PHY_TX (1 << 16) +#define EXT_PWR_DOWN_PHY_RX (1 << 17) +#define EXT_PWR_DOWN_PHY_SD (1 << 18) +#define EXT_PWR_DOWN_PHY_RD (1 << 19) +#define EXT_PWR_DOWN_PHY_EN (1 << 20) + +#define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN_V123 (1 << 0) +#define RGMII_LINK (1 << 4) +#define OOB_DISABLE (1 << 5) +#define RGMII_MODE_EN (1 << 6) +#define ID_MODE_DIS (1 << 16) + +#define EXT_GPHY_CTRL 0x1C +#define EXT_CFG_IDDQ_BIAS (1 << 0) +#define EXT_CFG_PWR_DOWN (1 << 1) +#define EXT_CK25_DIS (1 << 4) +#define EXT_GPHY_RESET (1 << 5) + +/* DMA rings size */ +#define DMA_RING_SIZE (0x40) +#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1)) + +/* DMA registers common definitions */ +#define DMA_RW_POINTER_MASK 0x1FF +#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF +#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16 +#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF +#define DMA_BUFFER_DONE_CNT_SHIFT 16 +#define DMA_P_INDEX_MASK 0xFFFF +#define DMA_C_INDEX_MASK 0xFFFF + +/* DMA ring size register */ +#define DMA_RING_SIZE_MASK 0xFFFF +#define DMA_RING_SIZE_SHIFT 16 +#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF + +/* DMA interrupt threshold register */ +#define DMA_INTR_THRESHOLD_MASK 0x01FF + +/* DMA XON/XOFF register */ +#define DMA_XON_THREHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_SHIFT 16 + +/* DMA flow period register */ +#define DMA_FLOW_PERIOD_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_SHIFT 16 + + +/* DMA control register */ +#define DMA_EN (1 << 0) +#define DMA_RING_BUF_EN_SHIFT 0x01 +#define DMA_RING_BUF_EN_MASK 0xFFFF +#define DMA_TSB_SWAP_EN (1 << 20) + +/* DMA status register */ +#define DMA_DISABLED (1 << 0) +#define DMA_DESC_RAM_INIT_BUSY (1 << 1) + +/* DMA SCB burst size register */ +#define DMA_SCB_BURST_SIZE_MASK 0x1F + +/* DMA activity vector register */ +#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF + +/* DMA backpressure mask register */ +#define DMA_BACKPRESSURE_MASK 0x1FFFF +#define DMA_PFC_ENABLE (1 << 31) + +/* DMA backpressure status register */ +#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF + +/* DMA override register */ +#define DMA_LITTLE_ENDIAN_MODE (1 << 0) +#define DMA_REGISTER_MODE (1 << 1) + +/* DMA timeout register */ +#define DMA_TIMEOUT_MASK 0xFFFF +#define DMA_TIMEOUT_VAL 5000 /* micro seconds */ + +/* TDMA rate limiting control register */ +#define DMA_RATE_LIMIT_EN_MASK 0xFFFF + +/* TDMA arbitration control register */ +#define DMA_ARBITER_MODE_MASK 0x03 +#define DMA_RING_BUF_PRIORITY_MASK 0x1F +#define DMA_RING_BUF_PRIORITY_SHIFT 5 +#define DMA_PRIO_REG_INDEX(q) ((q) / 6) +#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT) +#define DMA_RATE_ADJ_MASK 0xFF + +/* Tx/Rx Dma Descriptor common bits*/ +#define DMA_BUFLENGTH_MASK 0x0fff +#define DMA_BUFLENGTH_SHIFT 16 +#define DMA_OWN 0x8000 +#define DMA_EOP 0x4000 +#define DMA_SOP 0x2000 +#define DMA_WRAP 0x1000 +/* Tx specific Dma descriptor bits */ +#define DMA_TX_UNDERRUN 0x0200 +#define DMA_TX_APPEND_CRC 0x0040 +#define DMA_TX_OW_CRC 0x0020 +#define DMA_TX_DO_CSUM 0x0010 +#define DMA_TX_QTAG_SHIFT 7 + +/* Rx Specific Dma descriptor bits */ +#define DMA_RX_CHK_V3PLUS 0x8000 +#define DMA_RX_CHK_V12 0x1000 +#define DMA_RX_BRDCAST 0x0040 +#define DMA_RX_MULT 0x0020 +#define DMA_RX_LG 0x0010 +#define DMA_RX_NO 0x0008 +#define DMA_RX_RXER 0x0004 +#define DMA_RX_CRC_ERROR 0x0002 +#define DMA_RX_OV 0x0001 +#define DMA_RX_FI_MASK 0x001F +#define DMA_RX_FI_SHIFT 0x0007 +#define DMA_DESC_ALLOC_MASK 0x00FF + +#define DMA_ARBITER_RR 0x00 +#define DMA_ARBITER_WRR 0x01 +#define DMA_ARBITER_SP 0x02 + +struct enet_cb { + struct sk_buff *skb; + void __iomem *bd_addr; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* power management mode */ +enum bcmgenet_power_mode { + GENET_POWER_CABLE_SENSE = 0, + GENET_POWER_PASSIVE, + GENET_POWER_WOL_MAGIC, +}; + +struct bcmgenet_priv; + +/* We support both runtime GENET detection and compile-time + * to optimize code-paths for a given hardware + */ +enum bcmgenet_version { + GENET_V1 = 1, + GENET_V2, + GENET_V3, + GENET_V4, + GENET_V5 +}; + +#define GENET_IS_V1(p) ((p)->version == GENET_V1) +#define GENET_IS_V2(p) ((p)->version == GENET_V2) +#define GENET_IS_V3(p) ((p)->version == GENET_V3) +#define GENET_IS_V4(p) ((p)->version == GENET_V4) +#define GENET_IS_V5(p) ((p)->version == GENET_V5) + +/* Hardware flags */ +#define GENET_HAS_40BITS (1 << 0) +#define GENET_HAS_EXT (1 << 1) +#define GENET_HAS_MDIO_INTR (1 << 2) +#define GENET_HAS_MOCA_LINK_DET (1 << 3) + +/* BCMGENET hardware parameters, keep this structure nicely aligned + * since it is going to be used in hot paths + */ +struct bcmgenet_hw_params { + u8 tx_queues; + u8 tx_bds_per_q; + u8 rx_queues; + u8 rx_bds_per_q; + u8 bp_in_en_shift; + u32 bp_in_mask; + u8 hfb_filter_cnt; + u8 hfb_filter_size; + u8 qtag_mask; + u16 tbuf_offset; + u32 hfb_offset; + u32 hfb_reg_offset; + u32 rdma_offset; + u32 tdma_offset; + u32 words_per_bd; + u32 flags; +}; + +struct bcmgenet_skb_cb { + struct enet_cb *first_cb; /* First control block of SKB */ + struct enet_cb *last_cb; /* Last control block of SKB */ + unsigned int bytes_sent; /* bytes on the wire (no TSB) */ +}; + +#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb)) + +struct bcmgenet_tx_ring { + spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ + unsigned long packets; + unsigned long bytes; + unsigned int index; /* ring index */ + unsigned int queue; /* queue index */ + struct enet_cb *cbs; /* tx ring buffer control block*/ + unsigned int size; /* size of each tx ring */ + unsigned int clean_ptr; /* Tx ring clean pointer */ + unsigned int c_index; /* last consumer index of each ring*/ + unsigned int free_bds; /* # of free bds for each ring */ + unsigned int write_ptr; /* Tx ring write pointer SW copy */ + unsigned int prod_index; /* Tx ring producer index SW copy */ + unsigned int cb_ptr; /* Tx ring initial CB ptr */ + unsigned int end_ptr; /* Tx ring end CB ptr */ + void (*int_enable)(struct bcmgenet_tx_ring *); + void (*int_disable)(struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; +}; + +struct bcmgenet_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct dim dim; +}; + +struct bcmgenet_rx_ring { + struct napi_struct napi; /* Rx NAPI struct */ + unsigned long bytes; + unsigned long packets; + unsigned long errors; + unsigned long dropped; + unsigned int index; /* Rx ring index */ + struct enet_cb *cbs; /* Rx ring buffer control block */ + unsigned int size; /* Rx ring size */ + unsigned int c_index; /* Rx last consumer index */ + unsigned int read_ptr; /* Rx ring read pointer */ + unsigned int cb_ptr; /* Rx ring initial CB ptr */ + unsigned int end_ptr; /* Rx ring end CB ptr */ + unsigned int old_discards; + struct bcmgenet_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; + void (*int_enable)(struct bcmgenet_rx_ring *); + void (*int_disable)(struct bcmgenet_rx_ring *); + struct bcmgenet_priv *priv; +}; + +enum bcmgenet_rxnfc_state { + BCMGENET_RXNFC_STATE_UNUSED = 0, + BCMGENET_RXNFC_STATE_DISABLED, + BCMGENET_RXNFC_STATE_ENABLED +}; + +struct bcmgenet_rxnfc_rule { + struct list_head list; + struct ethtool_rx_flow_spec fs; + enum bcmgenet_rxnfc_state state; +}; + +/* device context */ +struct bcmgenet_priv { + void __iomem *base; + enum bcmgenet_version version; + struct net_device *dev; + + /* transmit variables */ + void __iomem *tx_bds; + struct enet_cb *tx_cbs; + unsigned int num_tx_bds; + + struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + + /* receive variables */ + void __iomem *rx_bds; + struct enet_cb *rx_cbs; + unsigned int num_rx_bds; + unsigned int rx_buf_len; + struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES]; + struct list_head rxnfc_list; + + struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; + + /* other misc variables */ + struct bcmgenet_hw_params *hw_params; + + /* MDIO bus variables */ + wait_queue_head_t wq; + bool internal_phy; + struct device_node *phy_dn; + struct device_node *mdio_dn; + struct mii_bus *mii_bus; + u16 gphy_rev; + struct clk *clk_eee; + bool clk_eee_enabled; + + /* PHY device variables */ + int old_link; + int old_speed; + int old_duplex; + int old_pause; + phy_interface_t phy_interface; + int phy_addr; + int ext_phy; + + /* Interrupt variables */ + struct work_struct bcmgenet_irq_work; + int irq0; + int irq1; + int wol_irq; + bool wol_irq_disabled; + + /* shared status */ + spinlock_t lock; + unsigned int irq0_stat; + + /* HW descriptors/checksum variables */ + bool crc_fwd_en; + + u32 dma_max_burst_length; + + u32 msg_enable; + + struct clk *clk; + struct platform_device *pdev; + struct platform_device *mii_pdev; + + /* WOL */ + struct clk *clk_wol; + u32 wolopts; + u8 sopass[SOPASS_MAX]; + bool wol_active; + + struct bcmgenet_mib_counters mib; + + struct ethtool_eee eee; +}; + +#define GENET_IO_MACRO(name, offset) \ +static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ + u32 off) \ +{ \ + /* MIPS chips strapped for BE will automagically configure the \ + * peripheral registers for CPU-native byte order. \ + */ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + return __raw_readl(priv->base + offset + off); \ + else \ + return readl_relaxed(priv->base + offset + off); \ +} \ +static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ + u32 val, u32 off) \ +{ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + __raw_writel(val, priv->base + offset + off); \ + else \ + writel_relaxed(val, priv->base + offset + off); \ +} + +GENET_IO_MACRO(ext, GENET_EXT_OFF); +GENET_IO_MACRO(umac, GENET_UMAC_OFF); +GENET_IO_MACRO(sys, GENET_SYS_OFF); + +/* interrupt l2 registers accessors */ +GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF); +GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF); + +/* HFB register accessors */ +GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset); + +/* GENET v2+ HFB control and filter len helpers */ +GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset); + +/* RBUF register accessors */ +GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); + +/* MDIO routines */ +int bcmgenet_mii_init(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_probe(struct net_device *dev); +void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_phy_power_set(struct net_device *dev, bool enable); +void bcmgenet_mii_setup(struct net_device *dev); + +/* Wake-on-LAN routines */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); + +#endif /* __BCMGENET_H__ */ diff --git a/devices/genet/bcmgenet-6.1-ethercat.c b/devices/genet/bcmgenet-6.1-ethercat.c index 15d61dcb..e75925b1 100644 --- a/devices/genet/bcmgenet-6.1-ethercat.c +++ b/devices/genet/bcmgenet-6.1-ethercat.c @@ -1897,7 +1897,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, if (skb) { pkts_compl++; bytes_compl += GENET_CB(skb)->bytes_sent; - if (!priv->ecdev) + if (!get_ecdev(priv)) dev_consume_skb_any(skb); } @@ -1914,7 +1914,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, ring->packets += pkts_compl; ring->bytes += bytes_compl; - if (!priv->ecdev) + if (!get_ecdev(priv)) netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), pkts_compl, bytes_compl); @@ -1940,7 +1940,7 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) unsigned int work_done = 0; struct netdev_queue *txq; - if (ring->priv->ecdev) + if (get_ecdev(ring->priv)) return 0; spin_lock(&ring->lock); @@ -1992,6 +1992,7 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, /* If 64 byte status block enabled, must make sure skb has * enough headroom for us to insert 64B status block. */ + BUG_ON(get_ecdev(priv)); new_skb = skb_realloc_headroom(skb, sizeof(*status)); if (!new_skb) { dev_kfree_skb_any(skb); @@ -2077,7 +2078,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock(&ring->lock); if (ring->free_bds <= (nr_frags + 1)) { - if (!priv->ecdev && !netif_tx_queue_stopped(txq)) { + if (!get_ecdev(priv) && !netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", @@ -2157,14 +2158,14 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) ring->prod_index += nr_frags + 1; ring->prod_index &= DMA_P_INDEX_MASK; - if (!priv->ecdev) { + if (!get_ecdev(priv)) { netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) netif_tx_stop_queue(txq); } - if (priv->ecdev || !netdev_xmit_more() || netif_xmit_stopped(txq)) + if (get_ecdev(priv) || !netdev_xmit_more() || netif_xmit_stopped(txq)) /* Packets are ready, update producer index */ bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index, TDMA_PROD_INDEX); @@ -2183,7 +2184,7 @@ out_unmap_frags: bcmgenet_free_tx_cb(kdev, tx_cb_ptr); } - if (!priv->ecdev) + if (!get_ecdev(priv)) dev_kfree_skb(skb); goto out; } @@ -2249,7 +2250,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, unsigned int p_index, mask; unsigned int discards; - if (!priv->ecdev) { + if (!get_ecdev(priv)) { /* Clear status before servicing to reduce spurious interrupts */ if (ring->index == DESC_INDEX) { bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, @@ -2291,7 +2292,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, __be16 rx_csum; cb = &priv->rx_cbs[ring->read_ptr]; - if (priv->ecdev) + if (get_ecdev(priv)) /* DMA unmap current skb */ skb = bcmgenet_free_rx_cb(kdev, cb); else @@ -2327,7 +2328,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, netif_err(priv, rx_status, dev, "oversized packet\n"); dev->stats.rx_length_errors++; dev->stats.rx_errors++; - if (!priv->ecdev) + if (!get_ecdev(priv)) dev_kfree_skb_any(skb); goto next; } @@ -2336,7 +2337,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); ring->errors++; - if (!priv->ecdev) + if (!get_ecdev(priv)) dev_kfree_skb_any(skb); goto next; } @@ -2358,12 +2359,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, if (dma_flag & DMA_RX_LG) dev->stats.rx_length_errors++; dev->stats.rx_errors++; - if (!priv->ecdev) + if (!get_ecdev(priv)) dev_kfree_skb_any(skb); goto next; } /* error packet */ - if (!priv->ecdev) { + if (!get_ecdev(priv)) { skb_put(skb, len); /* remove RSB and hardware 2bytes added for IP alignment */ @@ -2372,7 +2373,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, len -= 66; if (priv->crc_fwd_en) { - if (!priv->ecdev) + if (!get_ecdev(priv)) skb_trim(skb, len - ETH_FCS_LEN); len -= ETH_FCS_LEN; } @@ -2386,12 +2387,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, if (dma_flag & DMA_RX_MULT) dev->stats.multicast++; - if (priv->ecdev) { + if (get_ecdev(priv)) { dma_addr_t mapping; /* skip status block and padding in skb */ const unsigned char *data = skb->data + 66; - ecdev_receive(priv->ecdev, data, len); + ecdev_receive(get_ecdev(priv), data, len); /* remap skb */ mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, @@ -2438,7 +2439,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) struct dim_sample dim_sample = {}; unsigned int work_done; - if (ring->priv->ecdev) + if (get_ecdev(ring->priv)) return 0; work_done = bcmgenet_desc_rx(ring, budget); @@ -2652,7 +2653,7 @@ static void init_umac(struct bcmgenet_priv *priv) /* Enable MDIO interrupts on GENET v3+ */ if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) { int0_enable |= UMAC_IRQ_MDIO_ERROR; - if (!priv->ecdev) + if (!get_ecdev(priv)) int0_enable |= UMAC_IRQ_MDIO_DONE; } @@ -2682,7 +2683,7 @@ static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) usecs = ring->rx_coalesce_usecs; pkts = ring->rx_max_coalesced_frames; - if (ring->priv->ecdev) + if (get_ecdev(ring->priv)) return; /* If DIM was enabled, re-apply default parameters */ @@ -2751,7 +2752,7 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, DMA_END_ADDR); /* Initialize Tx NAPI */ - if (!priv->ecdev) + if (!get_ecdev(priv)) netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll); } @@ -2788,7 +2789,7 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, bcmgenet_init_rx_coalesce(ring); /* Initialize Rx NAPI */ - if (!priv->ecdev) + if (!get_ecdev(priv)) netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); @@ -2819,7 +2820,7 @@ static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_tx_ring *ring; - if (priv->ecdev) + if (get_ecdev(priv)) return; for (i = 0; i < priv->hw_params->tx_queues; ++i) { @@ -2838,7 +2839,7 @@ static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_tx_ring *ring; - if (priv->ecdev) + if (get_ecdev(priv)) return; for (i = 0; i < priv->hw_params->tx_queues; ++i) { @@ -2855,7 +2856,7 @@ static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_tx_ring *ring; - if (priv->ecdev) + if (get_ecdev(priv)) return; for (i = 0; i < priv->hw_params->tx_queues; ++i) { @@ -2941,7 +2942,7 @@ static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_rx_ring *ring; - if (priv->ecdev) + if (get_ecdev(priv)) return; for (i = 0; i < priv->hw_params->rx_queues; ++i) { @@ -2960,7 +2961,7 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_rx_ring *ring; - if (priv->ecdev) + if (get_ecdev(priv)) return; for (i = 0; i < priv->hw_params->rx_queues; ++i) { @@ -2979,7 +2980,7 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_rx_ring *ring; - if (priv->ecdev) + if (get_ecdev(priv)) return; for (i = 0; i < priv->hw_params->rx_queues; ++i) { @@ -3519,7 +3520,7 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_netif_start(dev); - if (!priv->ecdev) + if (!get_ecdev(priv)) netif_tx_start_all_queues(dev); return 0; @@ -4088,6 +4089,7 @@ static int bcmgenet_probe(struct platform_device *pdev) } priv = netdev_priv(dev); + priv->ecdev_initialized = false; priv->irq0 = platform_get_irq(pdev, 0); if (priv->irq0 < 0) { err = priv->irq0; @@ -4244,12 +4246,13 @@ static int bcmgenet_probe(struct platform_device *pdev) /* Turn off the main clock, WOL clock is handled separately */ clk_disable_unprepare(priv->clk); - priv->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); - if (priv->ecdev) { - err = ecdev_open(priv->ecdev); + priv->ecdev_ = ecdev_offer(dev, ec_poll, THIS_MODULE); + priv->ecdev_initialized = true; + if (get_ecdev(priv)) { + err = ecdev_open(get_ecdev(priv)); if (err) { - ecdev_withdraw(priv->ecdev); - priv->ecdev = NULL; + ecdev_withdraw(get_ecdev(priv)); + priv->ecdev_ = NULL; bcmgenet_mii_exit(dev); goto err; } @@ -4275,10 +4278,10 @@ static int bcmgenet_remove(struct platform_device *pdev) struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); dev_set_drvdata(&pdev->dev, NULL); - if (priv->ecdev) { - ecdev_close(priv->ecdev); - ecdev_withdraw(priv->ecdev); - priv->ecdev = NULL; + if (get_ecdev(priv)) { + ecdev_close(get_ecdev(priv)); + ecdev_withdraw(get_ecdev(priv)); + priv->ecdev_ = NULL; } else unregister_netdev(priv->dev); bcmgenet_mii_exit(priv->dev); @@ -4300,8 +4303,8 @@ static int bcmgenet_resume_noirq(struct device *d) int ret; u32 reg; - if (priv->ecdev) - ecdev_open(priv->ecdev); + if (get_ecdev(priv)) + ecdev_open(get_ecdev(priv)); else if (!netif_running(dev)) return 0; @@ -4387,8 +4390,8 @@ static int bcmgenet_resume(struct device *d) bcmgenet_netif_start(dev); - if (priv->ecdev) - ecdev_open(priv->ecdev); + if (get_ecdev(priv)) + ecdev_open(get_ecdev(priv)); else netif_device_attach(dev); @@ -4406,8 +4409,8 @@ static int bcmgenet_suspend(struct device *d) struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); - if (priv->ecdev) { - ecdev_close(priv->ecdev); + if (get_ecdev(priv)) { + ecdev_close(get_ecdev(priv)); } else { if (!netif_running(dev)) return 0; @@ -4432,8 +4435,8 @@ static int bcmgenet_suspend_noirq(struct device *d) struct bcmgenet_priv *priv = netdev_priv(dev); int ret = 0; - if (priv->ecdev) { - ecdev_close(priv->ecdev); + if (get_ecdev(priv)) { + ecdev_close(get_ecdev(priv)); } else if (!netif_running(dev)) return 0; diff --git a/devices/genet/bcmgenet-6.1-ethercat.h b/devices/genet/bcmgenet-6.1-ethercat.h index e624e4cf..aec31f42 100644 --- a/devices/genet/bcmgenet-6.1-ethercat.h +++ b/devices/genet/bcmgenet-6.1-ethercat.h @@ -650,9 +650,19 @@ struct bcmgenet_priv { struct ethtool_eee eee; /* EtherCAT device variables */ - ec_device_t *ecdev; + ec_device_t *ecdev_; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct bcmgenet_priv *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + + #define GENET_IO_MACRO(name, offset) \ static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ u32 off) \ diff --git a/devices/genet/bcmgenet_wol-5.10-ethercat.c b/devices/genet/bcmgenet_wol-5.10-ethercat.c new file mode 100644 index 00000000..aa3ae8aa --- /dev/null +++ b/devices/genet/bcmgenet_wol-5.10-ethercat.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet_wol: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet-5.10-ethercat.h" + +/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet + * Detection is supported through ethtool + */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + wol->wolopts = priv->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); +} + +/* ethtool function - set WOL (Wake on LAN) settings. + * Only for magic packet detection mode. + */ +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) + return -ENOTSUPP; + + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); + + /* Flag the device and relevant IRQ as wakeup capable */ + if (wol->wolopts) { + device_set_wakeup_enable(kdev, 1); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; + } else { + device_set_wakeup_enable(kdev, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; + } + + priv->wolopts = wol->wolopts; + + return 0; +} + +static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) +{ + struct net_device *dev = priv->dev; + int retries = 0; + + while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) + & RBUF_STATUS_WOL)) { + retries++; + if (retries > 5) { + netdev_crit(dev, "polling wol mode timeout\n"); + return -ETIMEDOUT; + } + mdelay(1); + } + + return retries; +} + +static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv) +{ + bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_MPD_PW_MS); + bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_MPD_PW_LS); +} + +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + struct net_device *dev = priv->dev; + struct bcmgenet_rxnfc_rule *rule; + u32 reg, hfb_ctrl_reg, hfb_enable = 0; + int retries = 0; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, dev, "unsupported mode: %d\n", mode); + return -EINVAL; + } + + /* Can't suspend with WoL if MAC is still in reset */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + reg &= ~CMD_SW_RESET; + + /* disable RX */ + reg &= ~CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + mdelay(10); + + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg |= MPD_EN; + if (priv->wolopts & WAKE_MAGICSECURE) { + bcmgenet_set_mpd_password(priv); + reg |= MPD_PW_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (priv->wolopts & WAKE_FILTER) { + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE) + hfb_enable |= (1 << rule->fs.location); + reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Do not leave UniMAC in MPD mode only */ + retries = bcmgenet_poll_wol_status(priv); + if (retries < 0) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + return retries; + } + + netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", + retries); + + clk_prepare_enable(priv->clk_wol); + priv->wol_active = 1; + + if (hfb_enable) { + bcmgenet_hfb_reg_writel(priv, hfb_enable, + HFB_FLT_ENABLE_V3PLUS + 4); + hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + } + + /* Enable CRC forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = 1; + reg |= CMD_CRC_FWD; + + /* Receiver must be enabled for WOL MP detection */ + reg |= CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + reg = UMAC_IRQ_MPD_R; + if (hfb_enable) + reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; + + bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR); + + return 0; +} + +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode); + return; + } + + if (!priv->wol_active) + return; /* failed to suspend so skip the rest */ + + priv->wol_active = 0; + clk_disable_unprepare(priv->clk_wol); + priv->crc_fwd_en = 0; + + /* Disable Magic Packet Detection */ + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already reset so skip the rest */ + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + /* Disable WAKE_FILTER Detection */ + if (priv->wolopts & WAKE_FILTER) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return; /* already reset so skip the rest */ + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Disable CRC Forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_CRC_FWD; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); +} diff --git a/devices/genet/bcmgenet_wol-5.10-orig.c b/devices/genet/bcmgenet_wol-5.10-orig.c new file mode 100644 index 00000000..2c2a56d5 --- /dev/null +++ b/devices/genet/bcmgenet_wol-5.10-orig.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet_wol: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet.h" + +/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet + * Detection is supported through ethtool + */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + wol->wolopts = priv->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); +} + +/* ethtool function - set WOL (Wake on LAN) settings. + * Only for magic packet detection mode. + */ +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) + return -ENOTSUPP; + + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); + + /* Flag the device and relevant IRQ as wakeup capable */ + if (wol->wolopts) { + device_set_wakeup_enable(kdev, 1); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; + } else { + device_set_wakeup_enable(kdev, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; + } + + priv->wolopts = wol->wolopts; + + return 0; +} + +static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) +{ + struct net_device *dev = priv->dev; + int retries = 0; + + while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) + & RBUF_STATUS_WOL)) { + retries++; + if (retries > 5) { + netdev_crit(dev, "polling wol mode timeout\n"); + return -ETIMEDOUT; + } + mdelay(1); + } + + return retries; +} + +static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv) +{ + bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_MPD_PW_MS); + bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_MPD_PW_LS); +} + +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + struct net_device *dev = priv->dev; + struct bcmgenet_rxnfc_rule *rule; + u32 reg, hfb_ctrl_reg, hfb_enable = 0; + int retries = 0; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, dev, "unsupported mode: %d\n", mode); + return -EINVAL; + } + + /* Can't suspend with WoL if MAC is still in reset */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + reg &= ~CMD_SW_RESET; + + /* disable RX */ + reg &= ~CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + mdelay(10); + + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg |= MPD_EN; + if (priv->wolopts & WAKE_MAGICSECURE) { + bcmgenet_set_mpd_password(priv); + reg |= MPD_PW_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (priv->wolopts & WAKE_FILTER) { + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE) + hfb_enable |= (1 << rule->fs.location); + reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Do not leave UniMAC in MPD mode only */ + retries = bcmgenet_poll_wol_status(priv); + if (retries < 0) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + return retries; + } + + netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", + retries); + + clk_prepare_enable(priv->clk_wol); + priv->wol_active = 1; + + if (hfb_enable) { + bcmgenet_hfb_reg_writel(priv, hfb_enable, + HFB_FLT_ENABLE_V3PLUS + 4); + hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + } + + /* Enable CRC forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = 1; + reg |= CMD_CRC_FWD; + + /* Receiver must be enabled for WOL MP detection */ + reg |= CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + reg = UMAC_IRQ_MPD_R; + if (hfb_enable) + reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; + + bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR); + + return 0; +} + +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode); + return; + } + + if (!priv->wol_active) + return; /* failed to suspend so skip the rest */ + + priv->wol_active = 0; + clk_disable_unprepare(priv->clk_wol); + priv->crc_fwd_en = 0; + + /* Disable Magic Packet Detection */ + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already reset so skip the rest */ + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + /* Disable WAKE_FILTER Detection */ + if (priv->wolopts & WAKE_FILTER) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return; /* already reset so skip the rest */ + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Disable CRC Forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_CRC_FWD; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); +} diff --git a/devices/genet/bcmgenet_wol-5.14-ethercat.c b/devices/genet/bcmgenet_wol-5.14-ethercat.c new file mode 100644 index 00000000..5bb9ed6b --- /dev/null +++ b/devices/genet/bcmgenet_wol-5.14-ethercat.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet_wol: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet-5.14-ethercat.h" + +/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet + * Detection is supported through ethtool + */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + wol->wolopts = priv->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); +} + +/* ethtool function - set WOL (Wake on LAN) settings. + * Only for magic packet detection mode. + */ +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) + return -ENOTSUPP; + + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); + + /* Flag the device and relevant IRQ as wakeup capable */ + if (wol->wolopts) { + device_set_wakeup_enable(kdev, 1); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; + } else { + device_set_wakeup_enable(kdev, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; + } + + priv->wolopts = wol->wolopts; + + return 0; +} + +static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) +{ + struct net_device *dev = priv->dev; + int retries = 0; + + while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) + & RBUF_STATUS_WOL)) { + retries++; + if (retries > 5) { + netdev_crit(dev, "polling wol mode timeout\n"); + return -ETIMEDOUT; + } + mdelay(1); + } + + return retries; +} + +static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv) +{ + bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_MPD_PW_MS); + bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_MPD_PW_LS); +} + +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + struct net_device *dev = priv->dev; + struct bcmgenet_rxnfc_rule *rule; + u32 reg, hfb_ctrl_reg, hfb_enable = 0; + int retries = 0; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, dev, "unsupported mode: %d\n", mode); + return -EINVAL; + } + + /* Can't suspend with WoL if MAC is still in reset */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + reg &= ~CMD_SW_RESET; + + /* disable RX */ + reg &= ~CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + mdelay(10); + + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg |= MPD_EN; + if (priv->wolopts & WAKE_MAGICSECURE) { + bcmgenet_set_mpd_password(priv); + reg |= MPD_PW_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (priv->wolopts & WAKE_FILTER) { + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE) + hfb_enable |= (1 << rule->fs.location); + reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Do not leave UniMAC in MPD mode only */ + retries = bcmgenet_poll_wol_status(priv); + if (retries < 0) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + return retries; + } + + netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", + retries); + + clk_prepare_enable(priv->clk_wol); + priv->wol_active = 1; + + if (hfb_enable) { + bcmgenet_hfb_reg_writel(priv, hfb_enable, + HFB_FLT_ENABLE_V3PLUS + 4); + hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + } + + /* Enable CRC forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = 1; + reg |= CMD_CRC_FWD; + + /* Receiver must be enabled for WOL MP detection */ + reg |= CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + reg = UMAC_IRQ_MPD_R; + if (hfb_enable) + reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; + + bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR); + + return 0; +} + +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode); + return; + } + + if (!priv->wol_active) + return; /* failed to suspend so skip the rest */ + + priv->wol_active = 0; + clk_disable_unprepare(priv->clk_wol); + priv->crc_fwd_en = 0; + + /* Disable Magic Packet Detection */ + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already reset so skip the rest */ + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + /* Disable WAKE_FILTER Detection */ + if (priv->wolopts & WAKE_FILTER) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return; /* already reset so skip the rest */ + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Disable CRC Forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_CRC_FWD; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); +} diff --git a/devices/genet/bcmgenet_wol-5.14-orig.c b/devices/genet/bcmgenet_wol-5.14-orig.c new file mode 100644 index 00000000..e31a5a39 --- /dev/null +++ b/devices/genet/bcmgenet_wol-5.14-orig.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet_wol: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet.h" + +/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet + * Detection is supported through ethtool + */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + wol->wolopts = priv->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); +} + +/* ethtool function - set WOL (Wake on LAN) settings. + * Only for magic packet detection mode. + */ +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) + return -ENOTSUPP; + + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); + + /* Flag the device and relevant IRQ as wakeup capable */ + if (wol->wolopts) { + device_set_wakeup_enable(kdev, 1); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; + } else { + device_set_wakeup_enable(kdev, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; + } + + priv->wolopts = wol->wolopts; + + return 0; +} + +static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) +{ + struct net_device *dev = priv->dev; + int retries = 0; + + while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) + & RBUF_STATUS_WOL)) { + retries++; + if (retries > 5) { + netdev_crit(dev, "polling wol mode timeout\n"); + return -ETIMEDOUT; + } + mdelay(1); + } + + return retries; +} + +static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv) +{ + bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_MPD_PW_MS); + bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_MPD_PW_LS); +} + +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + struct net_device *dev = priv->dev; + struct bcmgenet_rxnfc_rule *rule; + u32 reg, hfb_ctrl_reg, hfb_enable = 0; + int retries = 0; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, dev, "unsupported mode: %d\n", mode); + return -EINVAL; + } + + /* Can't suspend with WoL if MAC is still in reset */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + reg &= ~CMD_SW_RESET; + + /* disable RX */ + reg &= ~CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + mdelay(10); + + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg |= MPD_EN; + if (priv->wolopts & WAKE_MAGICSECURE) { + bcmgenet_set_mpd_password(priv); + reg |= MPD_PW_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (priv->wolopts & WAKE_FILTER) { + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE) + hfb_enable |= (1 << rule->fs.location); + reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Do not leave UniMAC in MPD mode only */ + retries = bcmgenet_poll_wol_status(priv); + if (retries < 0) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + return retries; + } + + netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", + retries); + + clk_prepare_enable(priv->clk_wol); + priv->wol_active = 1; + + if (hfb_enable) { + bcmgenet_hfb_reg_writel(priv, hfb_enable, + HFB_FLT_ENABLE_V3PLUS + 4); + hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + } + + /* Enable CRC forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = 1; + reg |= CMD_CRC_FWD; + + /* Receiver must be enabled for WOL MP detection */ + reg |= CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + reg = UMAC_IRQ_MPD_R; + if (hfb_enable) + reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; + + bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR); + + return 0; +} + +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode); + return; + } + + if (!priv->wol_active) + return; /* failed to suspend so skip the rest */ + + priv->wol_active = 0; + clk_disable_unprepare(priv->clk_wol); + priv->crc_fwd_en = 0; + + /* Disable Magic Packet Detection */ + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already reset so skip the rest */ + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + /* Disable WAKE_FILTER Detection */ + if (priv->wolopts & WAKE_FILTER) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return; /* already reset so skip the rest */ + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Disable CRC Forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_CRC_FWD; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); +} diff --git a/devices/genet/bcmmii-5.10-ethercat.c b/devices/genet/bcmmii-5.10-ethercat.c new file mode 100644 index 00000000..1db7dab0 --- /dev/null +++ b/devices/genet/bcmmii-5.10-ethercat.c @@ -0,0 +1,637 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET MDIO routines + * + * Copyright (c) 2014-2017 Broadcom + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet-5.10-ethercat.h" + +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + u32 reg, cmd_bits = 0; + bool status_changed = false; + + if (priv->old_link != phydev->link) { + status_changed = true; + priv->old_link = phydev->link; + } + + if (phydev->link) { + /* check speed/duplex/pause changes */ + if (priv->old_speed != phydev->speed) { + status_changed = true; + priv->old_speed = phydev->speed; + } + + if (priv->old_duplex != phydev->duplex) { + status_changed = true; + priv->old_duplex = phydev->duplex; + } + + if (priv->old_pause != phydev->pause) { + status_changed = true; + priv->old_pause = phydev->pause; + } + + /* done if nothing has changed */ + if (!status_changed) + return; + + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = UMAC_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = UMAC_SPEED_100; + else + cmd_bits = UMAC_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; + + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) + cmd_bits |= CMD_HD_EN; + + /* pause capability */ + if (!phydev->pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + + /* + * Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } else { + /* done if nothing has changed */ + if (!status_changed) + return; + + /* needed for MoCA fixed PHY to reflect correct link status */ + netif_carrier_off(dev); + } + if (priv->ecdev) + ecdev_set_link(priv->ecdev, phydev->link); + + phy_print_status(phydev); +} + + +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + struct bcmgenet_priv *priv; + u32 reg; + + if (dev && dev->phydev && status) { + priv = netdev_priv(dev); + reg = bcmgenet_umac_readl(priv, UMAC_MODE); + status->link = !!(reg & MODE_LINK_STATUS); + } + + return 0; +} + +void bcmgenet_phy_power_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg = 0; + + /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ + if (GENET_IS_V4(priv)) { + reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); + if (enable) { + reg &= ~EXT_CK25_DIS; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~EXT_GPHY_RESET; + } else { + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CK25_DIS; + } + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + udelay(60); + } else { + mdelay(1); + } +} + +static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) +{ + u32 reg; + + if (!GENET_IS_V5(priv)) { + /* Speed settings are set in bcmgenet_mii_setup() */ + reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); + reg |= LED_ACT_SOURCE_MAC; + bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); + } + + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + fixed_phy_set_link_update(priv->dev->phydev, + bcmgenet_fixed_phy_link_update); +} + +int bcmgenet_mii_config(struct net_device *dev, bool init) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + struct device *kdev = &priv->pdev->dev; + const char *phy_name = NULL; + u32 id_mode_dis = 0; + u32 port_ctrl; + u32 reg; + + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_INTERNAL: + phy_name = "internal PHY"; + fallthrough; + case PHY_INTERFACE_MODE_MOCA: + /* Irrespective of the actually configured PHY speed (100 or + * 1000) GENETv4 only has an internal GPHY so we will just end + * up masking the Gigabit features from what we support, not + * switching to the EPHY + */ + if (GENET_IS_V4(priv)) + port_ctrl = PORT_MODE_INT_GPHY; + else + port_ctrl = PORT_MODE_INT_EPHY; + + if (!phy_name) { + phy_name = "MoCA"; + bcmgenet_moca_phy_setup(priv); + } + break; + + case PHY_INTERFACE_MODE_MII: + phy_name = "external MII"; + phy_set_max_speed(phydev, SPEED_100); + port_ctrl = PORT_MODE_EXT_EPHY; + break; + + case PHY_INTERFACE_MODE_REVMII: + phy_name = "external RvMII"; + /* of_mdiobus_register took care of reading the 'max-speed' + * PHY property for us, effectively limiting the PHY supported + * capabilities, use that knowledge to also configure the + * Reverse MII interface correctly. + */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + dev->phydev->supported)) + port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; + break; + + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + phy_name = "external RGMII (no delay)"; + id_mode_dis = BIT(16); + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_TXID: + /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */ + phy_name = "external RGMII (TX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_RXID: + phy_name = "external RGMII (RX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + default: + dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); + return -EINVAL; + } + + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + + priv->ext_phy = !priv->internal_phy && + (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~ID_MODE_DIS; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; + else + reg |= RGMII_MODE_EN; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); + + return 0; +} + +int bcmgenet_mii_probe(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + struct phy_device *phydev; + u32 phy_flags = 0; + int ret; + + /* Communicate the integrated PHY revision */ + if (priv->internal_phy) + phy_flags = priv->gphy_rev; + + /* Initialize link state variables that bcmgenet_mii_setup() uses */ + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + if (dn) { + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + if (has_acpi_companion(kdev)) { + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct mii_bus *unimacbus; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + unimacbus = mdio_find_bus(mdio_bus_id); + if (!unimacbus) { + pr_err("Unable to find mii\n"); + return -ENODEV; + } + phydev = phy_find_first(unimacbus); + put_device(&unimacbus->dev); + if (!phydev) { + pr_err("Unable to find PHY\n"); + return -ENODEV; + } + } else { + phydev = dev->phydev; + } + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + priv->phy_interface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } + + /* Configure port multiplexer based on what the probed PHY device since + * reading the 'max-speed' property determines the maximum supported + * PHY speed which is needed for bcmgenet_mii_config() to configure + * things appropriately. + */ + ret = bcmgenet_mii_config(dev, true); + if (ret) { + phy_disconnect(dev->phydev); + return ret; + } + + linkmode_copy(phydev->advertising, phydev->supported); + + /* The internal PHY has its link interrupts routed to the + * Ethernet MAC ISRs. On GENETv5 there is a hardware issue + * that prevents the signaling of link UP interrupts when + * the link operates at 10Mbps, so fallback to polling for + * those versions of GENET. + */ + if (priv->internal_phy && !GENET_IS_V5(priv)) + dev->phydev->irq = PHY_IGNORE_INTERRUPT; + + return 0; +} + +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + char *compat; + + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return NULL; + + priv->mdio_dn = of_get_compatible_child(dn, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return NULL; + } + + return priv->mdio_dn; +} + +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; + } +} + +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); + return 0; +} + +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; + + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pres) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); + + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; + + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; + + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) + return -ENOMEM; + + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + if (dn) + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + else if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + else + ppd.phy_mask = ~0; + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; + + return 0; +out: + platform_device_put(ppdev); + return ret; +} + +static int bcmgenet_phy_interface_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + int phy_mode = device_get_phy_mode(kdev); + + if (phy_mode < 0) { + dev_err(kdev, "invalid PHY mode property\n"); + return phy_mode; + } + + priv->phy_interface = phy_mode; + + /* We need to specifically look up whether this PHY interface is + * internal or not *before* we even try to probe the PHY driver + * over MDIO as we may have shut down the internal PHY for power + * saving purposes. + */ + if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL) + priv->internal_phy = true; + + return 0; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct phy_device *phydev; + int ret; + + /* Fetch the PHY phandle */ + priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + + /* Get the link mode */ + ret = bcmgenet_phy_interface_init(priv); + if (ret) + return ret; + + /* Make sure we initialize MoCA PHYs with a link down */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + phydev = of_phy_find_device(dn); + if (phydev) { + phydev->link = 0; + put_device(&phydev->mdio.dev); + } + } + + return 0; +} + +static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct phy_device *phydev; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + + /* + * Internal or external PHY with MDIO access + */ + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); + if (!phydev) { + dev_err(kdev, "failed to register PHY device\n"); + return -ENODEV; + } + } else { + /* + * MoCA port or no MDIO access. + * Use fixed PHY to represent the link layer. + */ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = pd->phy_speed, + .duplex = pd->phy_duplex, + .pause = 0, + .asym_pause = 0, + }; + + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phydev || IS_ERR(phydev)) { + dev_err(kdev, "failed to register fixed PHY device\n"); + return -ENODEV; + } + + /* Make sure we initialize MoCA PHYs with a link down */ + phydev->link = 0; + + } + + priv->phy_interface = pd->phy_interface; + + return 0; +} + +static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + + if (dn) + return bcmgenet_mii_of_init(priv); + else if (has_acpi_companion(kdev)) + return bcmgenet_phy_interface_init(priv); + else + return bcmgenet_mii_pd_init(priv); +} + +int bcmgenet_mii_init(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + ret = bcmgenet_mii_register(priv); + if (ret) + return ret; + + ret = bcmgenet_mii_bus_init(priv); + if (ret) + goto out; + + return 0; + +out: + bcmgenet_mii_exit(dev); + return ret; +} + +void bcmgenet_mii_exit(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; + + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); + of_node_put(priv->phy_dn); + platform_device_unregister(priv->mii_pdev); +} diff --git a/devices/genet/bcmmii-5.10-orig.c b/devices/genet/bcmmii-5.10-orig.c new file mode 100644 index 00000000..f9e91304 --- /dev/null +++ b/devices/genet/bcmmii-5.10-orig.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET MDIO routines + * + * Copyright (c) 2014-2017 Broadcom + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet.h" + +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + u32 reg, cmd_bits = 0; + bool status_changed = false; + + if (priv->old_link != phydev->link) { + status_changed = true; + priv->old_link = phydev->link; + } + + if (phydev->link) { + /* check speed/duplex/pause changes */ + if (priv->old_speed != phydev->speed) { + status_changed = true; + priv->old_speed = phydev->speed; + } + + if (priv->old_duplex != phydev->duplex) { + status_changed = true; + priv->old_duplex = phydev->duplex; + } + + if (priv->old_pause != phydev->pause) { + status_changed = true; + priv->old_pause = phydev->pause; + } + + /* done if nothing has changed */ + if (!status_changed) + return; + + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = UMAC_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = UMAC_SPEED_100; + else + cmd_bits = UMAC_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; + + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) + cmd_bits |= CMD_HD_EN; + + /* pause capability */ + if (!phydev->pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + + /* + * Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } else { + /* done if nothing has changed */ + if (!status_changed) + return; + + /* needed for MoCA fixed PHY to reflect correct link status */ + netif_carrier_off(dev); + } + + phy_print_status(phydev); +} + + +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + struct bcmgenet_priv *priv; + u32 reg; + + if (dev && dev->phydev && status) { + priv = netdev_priv(dev); + reg = bcmgenet_umac_readl(priv, UMAC_MODE); + status->link = !!(reg & MODE_LINK_STATUS); + } + + return 0; +} + +void bcmgenet_phy_power_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg = 0; + + /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ + if (GENET_IS_V4(priv)) { + reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); + if (enable) { + reg &= ~EXT_CK25_DIS; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~EXT_GPHY_RESET; + } else { + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CK25_DIS; + } + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + udelay(60); + } else { + mdelay(1); + } +} + +static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) +{ + u32 reg; + + if (!GENET_IS_V5(priv)) { + /* Speed settings are set in bcmgenet_mii_setup() */ + reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); + reg |= LED_ACT_SOURCE_MAC; + bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); + } + + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + fixed_phy_set_link_update(priv->dev->phydev, + bcmgenet_fixed_phy_link_update); +} + +int bcmgenet_mii_config(struct net_device *dev, bool init) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + struct device *kdev = &priv->pdev->dev; + const char *phy_name = NULL; + u32 id_mode_dis = 0; + u32 port_ctrl; + u32 reg; + + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_INTERNAL: + phy_name = "internal PHY"; + fallthrough; + case PHY_INTERFACE_MODE_MOCA: + /* Irrespective of the actually configured PHY speed (100 or + * 1000) GENETv4 only has an internal GPHY so we will just end + * up masking the Gigabit features from what we support, not + * switching to the EPHY + */ + if (GENET_IS_V4(priv)) + port_ctrl = PORT_MODE_INT_GPHY; + else + port_ctrl = PORT_MODE_INT_EPHY; + + if (!phy_name) { + phy_name = "MoCA"; + bcmgenet_moca_phy_setup(priv); + } + break; + + case PHY_INTERFACE_MODE_MII: + phy_name = "external MII"; + phy_set_max_speed(phydev, SPEED_100); + port_ctrl = PORT_MODE_EXT_EPHY; + break; + + case PHY_INTERFACE_MODE_REVMII: + phy_name = "external RvMII"; + /* of_mdiobus_register took care of reading the 'max-speed' + * PHY property for us, effectively limiting the PHY supported + * capabilities, use that knowledge to also configure the + * Reverse MII interface correctly. + */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + dev->phydev->supported)) + port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; + break; + + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + phy_name = "external RGMII (no delay)"; + id_mode_dis = BIT(16); + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_TXID: + /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */ + phy_name = "external RGMII (TX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_RXID: + phy_name = "external RGMII (RX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + default: + dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); + return -EINVAL; + } + + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + + priv->ext_phy = !priv->internal_phy && + (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~ID_MODE_DIS; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; + else + reg |= RGMII_MODE_EN; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); + + return 0; +} + +int bcmgenet_mii_probe(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + struct phy_device *phydev; + u32 phy_flags = 0; + int ret; + + /* Communicate the integrated PHY revision */ + if (priv->internal_phy) + phy_flags = priv->gphy_rev; + + /* Initialize link state variables that bcmgenet_mii_setup() uses */ + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + if (dn) { + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + if (has_acpi_companion(kdev)) { + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct mii_bus *unimacbus; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + unimacbus = mdio_find_bus(mdio_bus_id); + if (!unimacbus) { + pr_err("Unable to find mii\n"); + return -ENODEV; + } + phydev = phy_find_first(unimacbus); + put_device(&unimacbus->dev); + if (!phydev) { + pr_err("Unable to find PHY\n"); + return -ENODEV; + } + } else { + phydev = dev->phydev; + } + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + priv->phy_interface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } + + /* Configure port multiplexer based on what the probed PHY device since + * reading the 'max-speed' property determines the maximum supported + * PHY speed which is needed for bcmgenet_mii_config() to configure + * things appropriately. + */ + ret = bcmgenet_mii_config(dev, true); + if (ret) { + phy_disconnect(dev->phydev); + return ret; + } + + linkmode_copy(phydev->advertising, phydev->supported); + + /* The internal PHY has its link interrupts routed to the + * Ethernet MAC ISRs. On GENETv5 there is a hardware issue + * that prevents the signaling of link UP interrupts when + * the link operates at 10Mbps, so fallback to polling for + * those versions of GENET. + */ + if (priv->internal_phy && !GENET_IS_V5(priv)) + dev->phydev->irq = PHY_IGNORE_INTERRUPT; + + return 0; +} + +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + char *compat; + + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return NULL; + + priv->mdio_dn = of_get_compatible_child(dn, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return NULL; + } + + return priv->mdio_dn; +} + +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; + } +} + +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); + return 0; +} + +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; + + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pres) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); + + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; + + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; + + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) + return -ENOMEM; + + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + if (dn) + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + else if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + else + ppd.phy_mask = ~0; + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; + + return 0; +out: + platform_device_put(ppdev); + return ret; +} + +static int bcmgenet_phy_interface_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + int phy_mode = device_get_phy_mode(kdev); + + if (phy_mode < 0) { + dev_err(kdev, "invalid PHY mode property\n"); + return phy_mode; + } + + priv->phy_interface = phy_mode; + + /* We need to specifically look up whether this PHY interface is + * internal or not *before* we even try to probe the PHY driver + * over MDIO as we may have shut down the internal PHY for power + * saving purposes. + */ + if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL) + priv->internal_phy = true; + + return 0; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct phy_device *phydev; + int ret; + + /* Fetch the PHY phandle */ + priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + + /* Get the link mode */ + ret = bcmgenet_phy_interface_init(priv); + if (ret) + return ret; + + /* Make sure we initialize MoCA PHYs with a link down */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + phydev = of_phy_find_device(dn); + if (phydev) { + phydev->link = 0; + put_device(&phydev->mdio.dev); + } + } + + return 0; +} + +static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct phy_device *phydev; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + + /* + * Internal or external PHY with MDIO access + */ + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); + if (!phydev) { + dev_err(kdev, "failed to register PHY device\n"); + return -ENODEV; + } + } else { + /* + * MoCA port or no MDIO access. + * Use fixed PHY to represent the link layer. + */ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = pd->phy_speed, + .duplex = pd->phy_duplex, + .pause = 0, + .asym_pause = 0, + }; + + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phydev || IS_ERR(phydev)) { + dev_err(kdev, "failed to register fixed PHY device\n"); + return -ENODEV; + } + + /* Make sure we initialize MoCA PHYs with a link down */ + phydev->link = 0; + + } + + priv->phy_interface = pd->phy_interface; + + return 0; +} + +static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + + if (dn) + return bcmgenet_mii_of_init(priv); + else if (has_acpi_companion(kdev)) + return bcmgenet_phy_interface_init(priv); + else + return bcmgenet_mii_pd_init(priv); +} + +int bcmgenet_mii_init(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + ret = bcmgenet_mii_register(priv); + if (ret) + return ret; + + ret = bcmgenet_mii_bus_init(priv); + if (ret) + goto out; + + return 0; + +out: + bcmgenet_mii_exit(dev); + return ret; +} + +void bcmgenet_mii_exit(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; + + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); + of_node_put(priv->phy_dn); + platform_device_unregister(priv->mii_pdev); +} diff --git a/devices/genet/bcmmii-5.14-ethercat.c b/devices/genet/bcmmii-5.14-ethercat.c new file mode 100644 index 00000000..e84d7c6f --- /dev/null +++ b/devices/genet/bcmmii-5.14-ethercat.c @@ -0,0 +1,637 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET MDIO routines + * + * Copyright (c) 2014-2017 Broadcom + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet-5.14-ethercat.h" + +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + u32 reg, cmd_bits = 0; + bool status_changed = false; + + if (priv->old_link != phydev->link) { + status_changed = true; + priv->old_link = phydev->link; + } + + if (phydev->link) { + /* check speed/duplex/pause changes */ + if (priv->old_speed != phydev->speed) { + status_changed = true; + priv->old_speed = phydev->speed; + } + + if (priv->old_duplex != phydev->duplex) { + status_changed = true; + priv->old_duplex = phydev->duplex; + } + + if (priv->old_pause != phydev->pause) { + status_changed = true; + priv->old_pause = phydev->pause; + } + + /* done if nothing has changed */ + if (!status_changed) + return; + + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = CMD_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = CMD_SPEED_100; + else + cmd_bits = CMD_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; + + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) + cmd_bits |= CMD_HD_EN; + + /* pause capability */ + if (!phydev->pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + + /* + * Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } else { + /* done if nothing has changed */ + if (!status_changed) + return; + + /* needed for MoCA fixed PHY to reflect correct link status */ + netif_carrier_off(dev); + } + if (priv->ecdev) + ecdev_set_link(priv->ecdev, phydev->link); + + phy_print_status(phydev); +} + + +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + struct bcmgenet_priv *priv; + u32 reg; + + if (dev && dev->phydev && status) { + priv = netdev_priv(dev); + reg = bcmgenet_umac_readl(priv, UMAC_MODE); + status->link = !!(reg & MODE_LINK_STATUS); + } + + return 0; +} + +void bcmgenet_phy_power_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg = 0; + + /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ + if (GENET_IS_V4(priv)) { + reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); + if (enable) { + reg &= ~EXT_CK25_DIS; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~EXT_GPHY_RESET; + } else { + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CK25_DIS; + } + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + udelay(60); + } else { + mdelay(1); + } +} + +static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) +{ + u32 reg; + + if (!GENET_IS_V5(priv)) { + /* Speed settings are set in bcmgenet_mii_setup() */ + reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); + reg |= LED_ACT_SOURCE_MAC; + bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); + } + + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + fixed_phy_set_link_update(priv->dev->phydev, + bcmgenet_fixed_phy_link_update); +} + +int bcmgenet_mii_config(struct net_device *dev, bool init) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + struct device *kdev = &priv->pdev->dev; + const char *phy_name = NULL; + u32 id_mode_dis = 0; + u32 port_ctrl; + u32 reg; + + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_INTERNAL: + phy_name = "internal PHY"; + fallthrough; + case PHY_INTERFACE_MODE_MOCA: + /* Irrespective of the actually configured PHY speed (100 or + * 1000) GENETv4 only has an internal GPHY so we will just end + * up masking the Gigabit features from what we support, not + * switching to the EPHY + */ + if (GENET_IS_V4(priv)) + port_ctrl = PORT_MODE_INT_GPHY; + else + port_ctrl = PORT_MODE_INT_EPHY; + + if (!phy_name) { + phy_name = "MoCA"; + bcmgenet_moca_phy_setup(priv); + } + break; + + case PHY_INTERFACE_MODE_MII: + phy_name = "external MII"; + phy_set_max_speed(phydev, SPEED_100); + port_ctrl = PORT_MODE_EXT_EPHY; + break; + + case PHY_INTERFACE_MODE_REVMII: + phy_name = "external RvMII"; + /* of_mdiobus_register took care of reading the 'max-speed' + * PHY property for us, effectively limiting the PHY supported + * capabilities, use that knowledge to also configure the + * Reverse MII interface correctly. + */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + dev->phydev->supported)) + port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; + break; + + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + phy_name = "external RGMII (no delay)"; + id_mode_dis = BIT(16); + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_TXID: + /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */ + phy_name = "external RGMII (TX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_RXID: + phy_name = "external RGMII (RX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + default: + dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); + return -EINVAL; + } + + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + + priv->ext_phy = !priv->internal_phy && + (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~ID_MODE_DIS; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; + else + reg |= RGMII_MODE_EN; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); + + return 0; +} + +int bcmgenet_mii_probe(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + struct phy_device *phydev; + u32 phy_flags = 0; + int ret; + + /* Communicate the integrated PHY revision */ + if (priv->internal_phy) + phy_flags = priv->gphy_rev; + + /* Initialize link state variables that bcmgenet_mii_setup() uses */ + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + if (dn) { + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + if (has_acpi_companion(kdev)) { + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct mii_bus *unimacbus; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + unimacbus = mdio_find_bus(mdio_bus_id); + if (!unimacbus) { + pr_err("Unable to find mii\n"); + return -ENODEV; + } + phydev = phy_find_first(unimacbus); + put_device(&unimacbus->dev); + if (!phydev) { + pr_err("Unable to find PHY\n"); + return -ENODEV; + } + } else { + phydev = dev->phydev; + } + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + priv->phy_interface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } + + /* Configure port multiplexer based on what the probed PHY device since + * reading the 'max-speed' property determines the maximum supported + * PHY speed which is needed for bcmgenet_mii_config() to configure + * things appropriately. + */ + ret = bcmgenet_mii_config(dev, true); + if (ret) { + phy_disconnect(dev->phydev); + return ret; + } + + linkmode_copy(phydev->advertising, phydev->supported); + + /* The internal PHY has its link interrupts routed to the + * Ethernet MAC ISRs. On GENETv5 there is a hardware issue + * that prevents the signaling of link UP interrupts when + * the link operates at 10Mbps, so fallback to polling for + * those versions of GENET. + */ + if (priv->internal_phy && !GENET_IS_V5(priv)) + dev->phydev->irq = PHY_MAC_INTERRUPT; + + return 0; +} + +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + char *compat; + + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return NULL; + + priv->mdio_dn = of_get_compatible_child(dn, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return NULL; + } + + return priv->mdio_dn; +} + +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; + } +} + +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); + return 0; +} + +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; + + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pres) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); + + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; + + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; + + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) + return -ENOMEM; + + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + if (dn) + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + else if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + else + ppd.phy_mask = ~0; + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; + + return 0; +out: + platform_device_put(ppdev); + return ret; +} + +static int bcmgenet_phy_interface_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + int phy_mode = device_get_phy_mode(kdev); + + if (phy_mode < 0) { + dev_err(kdev, "invalid PHY mode property\n"); + return phy_mode; + } + + priv->phy_interface = phy_mode; + + /* We need to specifically look up whether this PHY interface is + * internal or not *before* we even try to probe the PHY driver + * over MDIO as we may have shut down the internal PHY for power + * saving purposes. + */ + if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL) + priv->internal_phy = true; + + return 0; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct phy_device *phydev; + int ret; + + /* Fetch the PHY phandle */ + priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + + /* Get the link mode */ + ret = bcmgenet_phy_interface_init(priv); + if (ret) + return ret; + + /* Make sure we initialize MoCA PHYs with a link down */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + phydev = of_phy_find_device(dn); + if (phydev) { + phydev->link = 0; + put_device(&phydev->mdio.dev); + } + } + + return 0; +} + +static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct phy_device *phydev; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + + /* + * Internal or external PHY with MDIO access + */ + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); + if (!phydev) { + dev_err(kdev, "failed to register PHY device\n"); + return -ENODEV; + } + } else { + /* + * MoCA port or no MDIO access. + * Use fixed PHY to represent the link layer. + */ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = pd->phy_speed, + .duplex = pd->phy_duplex, + .pause = 0, + .asym_pause = 0, + }; + + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phydev || IS_ERR(phydev)) { + dev_err(kdev, "failed to register fixed PHY device\n"); + return -ENODEV; + } + + /* Make sure we initialize MoCA PHYs with a link down */ + phydev->link = 0; + + } + + priv->phy_interface = pd->phy_interface; + + return 0; +} + +static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + + if (dn) + return bcmgenet_mii_of_init(priv); + else if (has_acpi_companion(kdev)) + return bcmgenet_phy_interface_init(priv); + else + return bcmgenet_mii_pd_init(priv); +} + +int bcmgenet_mii_init(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + ret = bcmgenet_mii_register(priv); + if (ret) + return ret; + + ret = bcmgenet_mii_bus_init(priv); + if (ret) + goto out; + + return 0; + +out: + bcmgenet_mii_exit(dev); + return ret; +} + +void bcmgenet_mii_exit(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; + + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); + of_node_put(priv->phy_dn); + platform_device_unregister(priv->mii_pdev); +} diff --git a/devices/genet/bcmmii-5.14-orig.c b/devices/genet/bcmmii-5.14-orig.c new file mode 100644 index 00000000..89d16c58 --- /dev/null +++ b/devices/genet/bcmmii-5.14-orig.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET MDIO routines + * + * Copyright (c) 2014-2017 Broadcom + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet.h" + +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + u32 reg, cmd_bits = 0; + bool status_changed = false; + + if (priv->old_link != phydev->link) { + status_changed = true; + priv->old_link = phydev->link; + } + + if (phydev->link) { + /* check speed/duplex/pause changes */ + if (priv->old_speed != phydev->speed) { + status_changed = true; + priv->old_speed = phydev->speed; + } + + if (priv->old_duplex != phydev->duplex) { + status_changed = true; + priv->old_duplex = phydev->duplex; + } + + if (priv->old_pause != phydev->pause) { + status_changed = true; + priv->old_pause = phydev->pause; + } + + /* done if nothing has changed */ + if (!status_changed) + return; + + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = CMD_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = CMD_SPEED_100; + else + cmd_bits = CMD_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; + + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) + cmd_bits |= CMD_HD_EN; + + /* pause capability */ + if (!phydev->pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + + /* + * Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } else { + /* done if nothing has changed */ + if (!status_changed) + return; + + /* needed for MoCA fixed PHY to reflect correct link status */ + netif_carrier_off(dev); + } + + phy_print_status(phydev); +} + + +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + struct bcmgenet_priv *priv; + u32 reg; + + if (dev && dev->phydev && status) { + priv = netdev_priv(dev); + reg = bcmgenet_umac_readl(priv, UMAC_MODE); + status->link = !!(reg & MODE_LINK_STATUS); + } + + return 0; +} + +void bcmgenet_phy_power_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg = 0; + + /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ + if (GENET_IS_V4(priv)) { + reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); + if (enable) { + reg &= ~EXT_CK25_DIS; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~EXT_GPHY_RESET; + } else { + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CK25_DIS; + } + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + udelay(60); + } else { + mdelay(1); + } +} + +static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) +{ + u32 reg; + + if (!GENET_IS_V5(priv)) { + /* Speed settings are set in bcmgenet_mii_setup() */ + reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); + reg |= LED_ACT_SOURCE_MAC; + bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); + } + + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + fixed_phy_set_link_update(priv->dev->phydev, + bcmgenet_fixed_phy_link_update); +} + +int bcmgenet_mii_config(struct net_device *dev, bool init) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + struct device *kdev = &priv->pdev->dev; + const char *phy_name = NULL; + u32 id_mode_dis = 0; + u32 port_ctrl; + u32 reg; + + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_INTERNAL: + phy_name = "internal PHY"; + fallthrough; + case PHY_INTERFACE_MODE_MOCA: + /* Irrespective of the actually configured PHY speed (100 or + * 1000) GENETv4 only has an internal GPHY so we will just end + * up masking the Gigabit features from what we support, not + * switching to the EPHY + */ + if (GENET_IS_V4(priv)) + port_ctrl = PORT_MODE_INT_GPHY; + else + port_ctrl = PORT_MODE_INT_EPHY; + + if (!phy_name) { + phy_name = "MoCA"; + bcmgenet_moca_phy_setup(priv); + } + break; + + case PHY_INTERFACE_MODE_MII: + phy_name = "external MII"; + phy_set_max_speed(phydev, SPEED_100); + port_ctrl = PORT_MODE_EXT_EPHY; + break; + + case PHY_INTERFACE_MODE_REVMII: + phy_name = "external RvMII"; + /* of_mdiobus_register took care of reading the 'max-speed' + * PHY property for us, effectively limiting the PHY supported + * capabilities, use that knowledge to also configure the + * Reverse MII interface correctly. + */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + dev->phydev->supported)) + port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; + break; + + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + phy_name = "external RGMII (no delay)"; + id_mode_dis = BIT(16); + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_TXID: + /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */ + phy_name = "external RGMII (TX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_RXID: + phy_name = "external RGMII (RX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + default: + dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); + return -EINVAL; + } + + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + + priv->ext_phy = !priv->internal_phy && + (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~ID_MODE_DIS; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; + else + reg |= RGMII_MODE_EN; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); + + return 0; +} + +int bcmgenet_mii_probe(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + struct phy_device *phydev; + u32 phy_flags = 0; + int ret; + + /* Communicate the integrated PHY revision */ + if (priv->internal_phy) + phy_flags = priv->gphy_rev; + + /* Initialize link state variables that bcmgenet_mii_setup() uses */ + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + if (dn) { + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + if (has_acpi_companion(kdev)) { + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct mii_bus *unimacbus; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + unimacbus = mdio_find_bus(mdio_bus_id); + if (!unimacbus) { + pr_err("Unable to find mii\n"); + return -ENODEV; + } + phydev = phy_find_first(unimacbus); + put_device(&unimacbus->dev); + if (!phydev) { + pr_err("Unable to find PHY\n"); + return -ENODEV; + } + } else { + phydev = dev->phydev; + } + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + priv->phy_interface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } + + /* Configure port multiplexer based on what the probed PHY device since + * reading the 'max-speed' property determines the maximum supported + * PHY speed which is needed for bcmgenet_mii_config() to configure + * things appropriately. + */ + ret = bcmgenet_mii_config(dev, true); + if (ret) { + phy_disconnect(dev->phydev); + return ret; + } + + linkmode_copy(phydev->advertising, phydev->supported); + + /* The internal PHY has its link interrupts routed to the + * Ethernet MAC ISRs. On GENETv5 there is a hardware issue + * that prevents the signaling of link UP interrupts when + * the link operates at 10Mbps, so fallback to polling for + * those versions of GENET. + */ + if (priv->internal_phy && !GENET_IS_V5(priv)) + dev->phydev->irq = PHY_MAC_INTERRUPT; + + return 0; +} + +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + char *compat; + + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return NULL; + + priv->mdio_dn = of_get_compatible_child(dn, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return NULL; + } + + return priv->mdio_dn; +} + +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; + } +} + +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); + return 0; +} + +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; + + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pres) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); + + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; + + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; + + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) + return -ENOMEM; + + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + if (dn) + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + else if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + else + ppd.phy_mask = ~0; + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; + + return 0; +out: + platform_device_put(ppdev); + return ret; +} + +static int bcmgenet_phy_interface_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + int phy_mode = device_get_phy_mode(kdev); + + if (phy_mode < 0) { + dev_err(kdev, "invalid PHY mode property\n"); + return phy_mode; + } + + priv->phy_interface = phy_mode; + + /* We need to specifically look up whether this PHY interface is + * internal or not *before* we even try to probe the PHY driver + * over MDIO as we may have shut down the internal PHY for power + * saving purposes. + */ + if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL) + priv->internal_phy = true; + + return 0; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct phy_device *phydev; + int ret; + + /* Fetch the PHY phandle */ + priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + + /* Get the link mode */ + ret = bcmgenet_phy_interface_init(priv); + if (ret) + return ret; + + /* Make sure we initialize MoCA PHYs with a link down */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + phydev = of_phy_find_device(dn); + if (phydev) { + phydev->link = 0; + put_device(&phydev->mdio.dev); + } + } + + return 0; +} + +static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct phy_device *phydev; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + + /* + * Internal or external PHY with MDIO access + */ + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); + if (!phydev) { + dev_err(kdev, "failed to register PHY device\n"); + return -ENODEV; + } + } else { + /* + * MoCA port or no MDIO access. + * Use fixed PHY to represent the link layer. + */ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = pd->phy_speed, + .duplex = pd->phy_duplex, + .pause = 0, + .asym_pause = 0, + }; + + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phydev || IS_ERR(phydev)) { + dev_err(kdev, "failed to register fixed PHY device\n"); + return -ENODEV; + } + + /* Make sure we initialize MoCA PHYs with a link down */ + phydev->link = 0; + + } + + priv->phy_interface = pd->phy_interface; + + return 0; +} + +static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + + if (dn) + return bcmgenet_mii_of_init(priv); + else if (has_acpi_companion(kdev)) + return bcmgenet_phy_interface_init(priv); + else + return bcmgenet_mii_pd_init(priv); +} + +int bcmgenet_mii_init(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + ret = bcmgenet_mii_register(priv); + if (ret) + return ret; + + ret = bcmgenet_mii_bus_init(priv); + if (ret) + goto out; + + return 0; + +out: + bcmgenet_mii_exit(dev); + return ret; +} + +void bcmgenet_mii_exit(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; + + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); + of_node_put(priv->phy_dn); + platform_device_unregister(priv->mii_pdev); +} diff --git a/devices/genet/bcmmii-6.1-ethercat.c b/devices/genet/bcmmii-6.1-ethercat.c index 36bc8a1e..b8ddbaa8 100644 --- a/devices/genet/bcmmii-6.1-ethercat.c +++ b/devices/genet/bcmmii-6.1-ethercat.c @@ -105,8 +105,8 @@ void bcmgenet_mii_setup(struct net_device *dev) if (phydev->link) bcmgenet_mac_config(dev); - if (priv->ecdev) - ecdev_set_link(priv->ecdev, phydev->link); + if (get_ecdev(priv)) + ecdev_set_link(get_ecdev(priv), phydev->link); phy_print_status(phydev); } diff --git a/devices/genet/unimac-5.14-ethercat.h b/devices/genet/unimac-5.14-ethercat.h new file mode 100644 index 00000000..585a8528 --- /dev/null +++ b/devices/genet/unimac-5.14-ethercat.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __UNIMAC_H +#define __UNIMAC_H + +#define UMAC_HD_BKP_CTRL 0x004 +#define HD_FC_EN (1 << 0) +#define HD_FC_BKOFF_OK (1 << 1) +#define IPG_CONFIG_RX_SHIFT 2 +#define IPG_CONFIG_RX_MASK 0x1F +#define UMAC_CMD 0x008 +#define CMD_TX_EN (1 << 0) +#define CMD_RX_EN (1 << 1) +#define CMD_SPEED_10 0 +#define CMD_SPEED_100 1 +#define CMD_SPEED_1000 2 +#define CMD_SPEED_2500 3 +#define CMD_SPEED_SHIFT 2 +#define CMD_SPEED_MASK 3 +#define CMD_PROMISC (1 << 4) +#define CMD_PAD_EN (1 << 5) +#define CMD_CRC_FWD (1 << 6) +#define CMD_PAUSE_FWD (1 << 7) +#define CMD_RX_PAUSE_IGNORE (1 << 8) +#define CMD_TX_ADDR_INS (1 << 9) +#define CMD_HD_EN (1 << 10) +#define CMD_SW_RESET_OLD (1 << 11) +#define CMD_SW_RESET (1 << 13) +#define CMD_LCL_LOOP_EN (1 << 15) +#define CMD_AUTO_CONFIG (1 << 22) +#define CMD_CNTL_FRM_EN (1 << 23) +#define CMD_NO_LEN_CHK (1 << 24) +#define CMD_RMT_LOOP_EN (1 << 25) +#define CMD_RX_ERR_DISC (1 << 26) +#define CMD_PRBL_EN (1 << 27) +#define CMD_TX_PAUSE_IGNORE (1 << 28) +#define CMD_TX_RX_EN (1 << 29) +#define CMD_RUNT_FILTER_DIS (1 << 30) +#define UMAC_MAC0 0x00c +#define UMAC_MAC1 0x010 +#define UMAC_MAX_FRAME_LEN 0x014 +#define UMAC_PAUSE_QUANTA 0x018 +#define UMAC_MODE 0x044 +#define MODE_LINK_STATUS (1 << 5) +#define UMAC_FRM_TAG0 0x048 /* outer tag */ +#define UMAC_FRM_TAG1 0x04c /* inner tag */ +#define UMAC_TX_IPG_LEN 0x05c +#define UMAC_EEE_CTRL 0x064 +#define EN_LPI_RX_PAUSE (1 << 0) +#define EN_LPI_TX_PFC (1 << 1) +#define EN_LPI_TX_PAUSE (1 << 2) +#define EEE_EN (1 << 3) +#define RX_FIFO_CHECK (1 << 4) +#define EEE_TX_CLK_DIS (1 << 5) +#define DIS_EEE_10M (1 << 6) +#define LP_IDLE_PREDICTION_MODE (1 << 7) +#define UMAC_EEE_LPI_TIMER 0x068 +#define UMAC_EEE_WAKE_TIMER 0x06C +#define UMAC_EEE_REF_COUNT 0x070 +#define EEE_REFERENCE_COUNT_MASK 0xffff +#define UMAC_RX_IPG_INV 0x078 +#define UMAC_MACSEC_PROG_TX_CRC 0x310 +#define UMAC_MACSEC_CTRL 0x314 +#define UMAC_PAUSE_CTRL 0x330 +#define UMAC_TX_FLUSH 0x334 +#define UMAC_RX_FIFO_STATUS 0x338 +#define UMAC_TX_FIFO_STATUS 0x33c + +#endif diff --git a/devices/genet/unimac-5.14-orig.h b/devices/genet/unimac-5.14-orig.h new file mode 100644 index 00000000..585a8528 --- /dev/null +++ b/devices/genet/unimac-5.14-orig.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __UNIMAC_H +#define __UNIMAC_H + +#define UMAC_HD_BKP_CTRL 0x004 +#define HD_FC_EN (1 << 0) +#define HD_FC_BKOFF_OK (1 << 1) +#define IPG_CONFIG_RX_SHIFT 2 +#define IPG_CONFIG_RX_MASK 0x1F +#define UMAC_CMD 0x008 +#define CMD_TX_EN (1 << 0) +#define CMD_RX_EN (1 << 1) +#define CMD_SPEED_10 0 +#define CMD_SPEED_100 1 +#define CMD_SPEED_1000 2 +#define CMD_SPEED_2500 3 +#define CMD_SPEED_SHIFT 2 +#define CMD_SPEED_MASK 3 +#define CMD_PROMISC (1 << 4) +#define CMD_PAD_EN (1 << 5) +#define CMD_CRC_FWD (1 << 6) +#define CMD_PAUSE_FWD (1 << 7) +#define CMD_RX_PAUSE_IGNORE (1 << 8) +#define CMD_TX_ADDR_INS (1 << 9) +#define CMD_HD_EN (1 << 10) +#define CMD_SW_RESET_OLD (1 << 11) +#define CMD_SW_RESET (1 << 13) +#define CMD_LCL_LOOP_EN (1 << 15) +#define CMD_AUTO_CONFIG (1 << 22) +#define CMD_CNTL_FRM_EN (1 << 23) +#define CMD_NO_LEN_CHK (1 << 24) +#define CMD_RMT_LOOP_EN (1 << 25) +#define CMD_RX_ERR_DISC (1 << 26) +#define CMD_PRBL_EN (1 << 27) +#define CMD_TX_PAUSE_IGNORE (1 << 28) +#define CMD_TX_RX_EN (1 << 29) +#define CMD_RUNT_FILTER_DIS (1 << 30) +#define UMAC_MAC0 0x00c +#define UMAC_MAC1 0x010 +#define UMAC_MAX_FRAME_LEN 0x014 +#define UMAC_PAUSE_QUANTA 0x018 +#define UMAC_MODE 0x044 +#define MODE_LINK_STATUS (1 << 5) +#define UMAC_FRM_TAG0 0x048 /* outer tag */ +#define UMAC_FRM_TAG1 0x04c /* inner tag */ +#define UMAC_TX_IPG_LEN 0x05c +#define UMAC_EEE_CTRL 0x064 +#define EN_LPI_RX_PAUSE (1 << 0) +#define EN_LPI_TX_PFC (1 << 1) +#define EN_LPI_TX_PAUSE (1 << 2) +#define EEE_EN (1 << 3) +#define RX_FIFO_CHECK (1 << 4) +#define EEE_TX_CLK_DIS (1 << 5) +#define DIS_EEE_10M (1 << 6) +#define LP_IDLE_PREDICTION_MODE (1 << 7) +#define UMAC_EEE_LPI_TIMER 0x068 +#define UMAC_EEE_WAKE_TIMER 0x06C +#define UMAC_EEE_REF_COUNT 0x070 +#define EEE_REFERENCE_COUNT_MASK 0xffff +#define UMAC_RX_IPG_INV 0x078 +#define UMAC_MACSEC_PROG_TX_CRC 0x310 +#define UMAC_MACSEC_CTRL 0x314 +#define UMAC_PAUSE_CTRL 0x330 +#define UMAC_TX_FLUSH 0x334 +#define UMAC_RX_FIFO_STATUS 0x338 +#define UMAC_TX_FIFO_STATUS 0x33c + +#endif diff --git a/devices/genet/update.sh b/devices/genet/update.sh new file mode 100755 index 00000000..94f2acec --- /dev/null +++ b/devices/genet/update.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +if [ $# -ne 3 ]; then + echo "Need 3 arguments: 1) kernel source dir, 2) previous version, 3) version to add" + exit 1 +fi + +KERNELDIR=$1 +PREVER=$2 +KERNELVER=$3 + +GENETDIR=drivers/net/ethernet/broadcom/genet + +FILES="bcmgenet.c bcmgenet.h bcmgenet_wol.c bcmmii.c" + +for f in $FILES; do + echo $f + o=${f/\./-$KERNELVER-orig.} + e=${f/\./-$KERNELVER-ethercat.} + cp -v $KERNELDIR/$GENETDIR/$f $o + chmod 644 $o + cp -v $o $e + op=${f/\./-$PREVER-orig.} + ep=${f/\./-$PREVER-ethercat.} + diff -up $op $ep | patch -p1 --no-backup-if-mismatch $e + sed -i s/$PREVER-ethercat.h/$KERNELVER-ethercat.h/ $e + git add $o $e + echo -e "\t$e \\" >> Makefile.am + echo -e "\t$o \\" >> Makefile.am +done + +cp $KERNELDIR/$GENETDIR/../unimac.h unimac-$KERNELVER-orig.h +cp $KERNELDIR/$GENETDIR/../unimac.h unimac-$KERNELVER-ethercat.h +git add unimac-$KERNELVER-orig.h unimac-$KERNELVER-ethercat.h +echo -e "\tunimac-$KERNELVER-ethercat.h \\" >> Makefile.am +echo -e "\tunimac-$KERNELVER-orig.h \\" >> Makefile.am + +echo "Don't forget to update Makefile.am!" diff --git a/devices/igb/Kbuild.in b/devices/igb/Kbuild.in index 65c1e58d..3be67397 100644 --- a/devices/igb/Kbuild.in +++ b/devices/igb/Kbuild.in @@ -37,6 +37,10 @@ REV := $(shell if test -s $(TOPDIR)/revision; then \ ifeq (@ENABLE_IGB@,1) obj-m += ec_igb.o +ifeq (@ENABLE_DRIVER_RESOURCE_VERIFYING@,1) + ccflags-y := -DEC_ENABLE_DRIVER_RESOURCE_VERIFYING +endif + ec_igb-objs := \ e1000_82575-@KERNEL_IGB@-ethercat.o \ e1000_i210-@KERNEL_IGB@-ethercat.o \ diff --git a/devices/igb/igb-5.14-ethercat.h b/devices/igb/igb-5.14-ethercat.h index 59392816..4170d7d4 100644 --- a/devices/igb/igb-5.14-ethercat.h +++ b/devices/igb/igb-5.14-ethercat.h @@ -670,11 +670,20 @@ struct igb_adapter { struct vf_mac_filter *vf_mac_list; /* EtherCAT device variables */ - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct igb_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + /* flags controlling PTP/1588 function */ #define IGB_PTP_ENABLED BIT(0) #define IGB_PTP_OVERFLOW_CHECK BIT(1) diff --git a/devices/igb/igb-5.15-ethercat.h b/devices/igb/igb-5.15-ethercat.h index 02c798fe..b28929e8 100644 --- a/devices/igb/igb-5.15-ethercat.h +++ b/devices/igb/igb-5.15-ethercat.h @@ -670,11 +670,20 @@ struct igb_adapter { struct vf_mac_filter *vf_mac_list; /* EtherCAT device variables */ - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct igb_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + /* flags controlling PTP/1588 function */ #define IGB_PTP_ENABLED BIT(0) #define IGB_PTP_OVERFLOW_CHECK BIT(1) diff --git a/devices/igb/igb-6.1-ethercat.h b/devices/igb/igb-6.1-ethercat.h index dbdf917d..8ed16222 100644 --- a/devices/igb/igb-6.1-ethercat.h +++ b/devices/igb/igb-6.1-ethercat.h @@ -672,11 +672,20 @@ struct igb_adapter { spinlock_t vfs_lock; /* EtherCAT device variables */ - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct igb_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + /* flags controlling PTP/1588 function */ #define IGB_PTP_ENABLED BIT(0) #define IGB_PTP_OVERFLOW_CHECK BIT(1) diff --git a/devices/igb/igb_main-3.18-ethercat.c b/devices/igb/igb_main-3.18-ethercat.c index 36f7bda5..44c41fa4 100644 --- a/devices/igb/igb_main-3.18-ethercat.c +++ b/devices/igb/igb_main-3.18-ethercat.c @@ -1047,10 +1047,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) if (q_vector->rx.ring) adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL; - if (!adapter->ecdev) { - netif_napi_del(&q_vector->napi); - } - + netif_napi_del(&q_vector->napi); } static void igb_reset_interrupt_capability(struct igb_adapter *adapter) @@ -1221,11 +1218,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, if (!q_vector) return -ENOMEM; - if (!adapter->ecdev) { - /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, - igb_poll, 64); - } + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -6430,6 +6425,9 @@ static int igb_poll(struct napi_struct *napi, int budget) napi); bool clean_complete = true; + if (q_vector->adapter->ecdev) + return -EBUSY; + #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); diff --git a/devices/igb/igb_main-4.19-ethercat.c b/devices/igb/igb_main-4.19-ethercat.c index a4169472..d49ae92e 100644 --- a/devices/igb/igb_main-4.19-ethercat.c +++ b/devices/igb/igb_main-4.19-ethercat.c @@ -1037,10 +1037,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) if (q_vector->rx.ring) adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; - if (!adapter->ecdev) { - netif_napi_del(&q_vector->napi); - } - + netif_napi_del(&q_vector->napi); } static void igb_reset_interrupt_capability(struct igb_adapter *adapter) @@ -1219,11 +1216,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, if (!q_vector) return -ENOMEM; - if (!adapter->ecdev) { - /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, - igb_poll, 64); - } + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -7858,6 +7853,9 @@ static int igb_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int work_done = 0; + if (q_vector->adapter->ecdev) + return -EBUSY; + #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); diff --git a/devices/igb/igb_main-4.4-ethercat.c b/devices/igb/igb_main-4.4-ethercat.c index 0d89fde9..c56686fa 100644 --- a/devices/igb/igb_main-4.4-ethercat.c +++ b/devices/igb/igb_main-4.4-ethercat.c @@ -1047,10 +1047,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) if (q_vector->rx.ring) adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; - if (!adapter->ecdev) { - netif_napi_del(&q_vector->napi); - } - + netif_napi_del(&q_vector->napi); } static void igb_reset_interrupt_capability(struct igb_adapter *adapter) @@ -1227,11 +1224,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, if (!q_vector) return -ENOMEM; - if (!adapter->ecdev) { - /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, - igb_poll, 64); - } + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -6506,6 +6501,9 @@ static int igb_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int work_done = 0; + if (q_vector->adapter->ecdev) + return -EBUSY; + #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); diff --git a/devices/igb/igb_main-5.10-ethercat.c b/devices/igb/igb_main-5.10-ethercat.c index 719a03d4..8443832c 100644 --- a/devices/igb/igb_main-5.10-ethercat.c +++ b/devices/igb/igb_main-5.10-ethercat.c @@ -1036,9 +1036,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) if (q_vector->rx.ring) adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; - if (!adapter->ecdev) - netif_napi_del(&q_vector->napi); - + netif_napi_del(&q_vector->napi); } static void igb_reset_interrupt_capability(struct igb_adapter *adapter) @@ -1218,10 +1216,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, return -ENOMEM; /* initialize NAPI */ - if (!adapter->ecdev) { - netif_napi_add(adapter->netdev, &q_vector->napi, - igb_poll, 64); - } + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -6491,7 +6487,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - if (unlikely(!adapter->ecdev && + if (unlikely(!adapter->ecdev && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && @@ -8133,6 +8129,10 @@ static int igb_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int work_done = 0; + if (q_vector->adapter->ecdev) { + return -EBUSY; + } + #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); @@ -8274,7 +8274,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; - if (!adapter->ecdev && + if (!adapter->ecdev && test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct e1000_hw *hw = &adapter->hw; @@ -8926,7 +8926,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) total_packets++; continue; } - + /* verify the packet layout is correct */ if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; diff --git a/devices/igb/igb_main-5.14-ethercat.c b/devices/igb/igb_main-5.14-ethercat.c index 483642c6..5a12a120 100644 --- a/devices/igb/igb_main-5.14-ethercat.c +++ b/devices/igb/igb_main-5.14-ethercat.c @@ -946,7 +946,7 @@ static int igb_request_msix(struct igb_adapter *adapter) struct net_device *netdev = adapter->netdev; int i, err = 0, vector = 0, free_vector = 0; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return 0; } @@ -1047,8 +1047,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) if (q_vector->rx.ring) adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; - if (!adapter->ecdev) - netif_napi_del(&q_vector->napi); + netif_napi_del(&q_vector->napi); } @@ -1229,10 +1228,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, return -ENOMEM; /* initialize NAPI */ - if (!adapter->ecdev) { - netif_napi_add(adapter->netdev, &q_vector->napi, - igb_poll, 64); - } + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -1450,7 +1447,7 @@ static int igb_request_irq(struct igb_adapter *adapter) igb_assign_vector(adapter->q_vector[0], 0); - if (!adapter->ecdev && adapter->flags & IGB_FLAG_HAS_MSI) { + if (!get_ecdev(adapter) && adapter->flags & IGB_FLAG_HAS_MSI) { err = request_irq(pdev->irq, igb_intr_msi, 0, netdev->name, adapter); if (!err) @@ -1461,7 +1458,7 @@ static int igb_request_irq(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_HAS_MSI; } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, netdev->name, adapter); @@ -1476,7 +1473,7 @@ request_done: static void igb_free_irq(struct igb_adapter *adapter) { - if (adapter->ecdev) + if (get_ecdev(adapter)) return; if (adapter->flags & IGB_FLAG_HAS_MSIX) { int vector = 0, i; @@ -1516,7 +1513,7 @@ static void igb_irq_disable(struct igb_adapter *adapter) wr32(E1000_IMC, ~0); wrfl(); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* skip synchronizing IRQs */ return; } @@ -1538,7 +1535,7 @@ static void igb_irq_enable(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) + if (get_ecdev(adapter)) return; if (adapter->flags & IGB_FLAG_HAS_MSIX) { @@ -2156,7 +2153,7 @@ int igb_up(struct igb_adapter *adapter) clear_bit(__IGB_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&(adapter->q_vector[i]->napi)); } @@ -2179,7 +2176,7 @@ int igb_up(struct igb_adapter *adapter) wr32(E1000_CTRL_EXT, reg_data); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(adapter->netdev); /* start the watchdog. */ @@ -2213,7 +2210,7 @@ void igb_down(struct igb_adapter *adapter) igb_nfc_filter_exit(adapter); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } @@ -2231,7 +2228,7 @@ void igb_down(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; for (i = 0; i < adapter->num_q_vectors; i++) { - if (!adapter->ecdev && adapter->q_vector[i]) { + if (!get_ecdev(adapter) && adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); napi_disable(&adapter->q_vector[i]->napi); } @@ -2454,7 +2451,7 @@ void igb_reset(struct igb_adapter *adapter) break; } } - if (!adapter->ecdev && !netif_running(adapter->netdev)) + if (!get_ecdev(adapter) && !netif_running(adapter->netdev)) igb_power_down_link(adapter); igb_update_mng_vlan(adapter); @@ -2918,7 +2915,7 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct igb_adapter *adapter = netdev_priv(dev); - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; @@ -2987,7 +2984,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n, int nxmit = 0; int i; - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) @@ -3289,6 +3286,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; + adapter->ecdev_initialized = 0; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); @@ -3438,7 +3436,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "NVM Read Error\n"); } +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, hw->mac.addr); +#else memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); +#endif if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); @@ -3572,12 +3574,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) igb_get_hw_control(adapter); - adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); - if (adapter->ecdev) { + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = 1; + if (get_ecdev(adapter)) { init_irq_work(&adapter->ec_watchdog_kicker, ec_kick_watchdog); - err = ecdev_open(adapter->ecdev); + err = ecdev_open(get_ecdev(adapter)); if (err) { - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); goto err_register; } adapter->ec_watchdog_jiffies = jiffies; @@ -3877,10 +3880,10 @@ static void igb_remove(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); irq_work_sync(&adapter->ec_watchdog_kicker); - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); } pm_runtime_get_noresume(&pdev->dev); @@ -3917,7 +3920,7 @@ static void igb_remove(struct pci_dev *pdev) igb_disable_sriov(pdev); #endif - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { unregister_netdev(netdev); } @@ -4160,8 +4163,8 @@ static int __igb_open(struct net_device *netdev, bool resuming) if (!resuming) pm_runtime_get_sync(&pdev->dev); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); } @@ -4189,7 +4192,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) if (err) goto err_req_irq; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); @@ -4205,7 +4208,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&(adapter->q_vector[i]->napi)); } @@ -4224,14 +4227,14 @@ static int __igb_open(struct net_device *netdev, bool resuming) wr32(E1000_CTRL_EXT, reg_data); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(netdev); } if (!resuming) pm_runtime_put(&pdev->dev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -4480,7 +4483,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - if (!adapter->ecdev) + if (!get_ecdev(adapter)) rx_ring->xdp_prog = adapter->xdp_prog; /* XDP RX-queue info */ @@ -4791,7 +4794,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, int reg_idx = ring->reg_idx; u32 rxdctl = 0; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL)); @@ -4931,7 +4934,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) /* Free all the Tx ring sk_buffs */ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(tx_buffer->skb); } @@ -5108,7 +5111,11 @@ static int igb_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, addr->sa_data); +#else memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); +#endif memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ @@ -5541,13 +5548,13 @@ static void igb_watchdog_task(struct work_struct *work) u32 connsw; u16 phy_data, retry_count = 20; - if (adapter->ecdev) + if (get_ecdev(adapter)) hw->mac.get_link_status = true; link = igb_has_link(adapter); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, link); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), link); return; } @@ -6198,7 +6205,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) struct net_device *netdev = tx_ring->netdev; struct igb_adapter *adapter = netdev_priv(netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_stop_subqueue(netdev, tx_ring->queue_index); } @@ -6215,7 +6222,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) return -EBUSY; /* A reprieve! */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_wake_subqueue(netdev, tx_ring->queue_index); } @@ -6371,7 +6378,7 @@ dma_error: DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(tx_buffer->skb); tx_buffer->skb = NULL; } @@ -6489,7 +6496,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - if (unlikely(!adapter->ecdev && + if (unlikely(!get_ecdev(adapter) && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && @@ -6528,12 +6535,12 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(first->skb); first->skb = NULL; } cleanup_tx_tstamp: - if (unlikely(!adapter->ecdev && tx_flags & IGB_TX_FLAGS_TSTAMP)) { + if (unlikely(!get_ecdev(adapter) && tx_flags & IGB_TX_FLAGS_TSTAMP)) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; if (adapter->hw.mac.type == e1000_82576) @@ -8131,6 +8138,10 @@ static int igb_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int work_done = 0; + if (get_ecdev(q_vector->adapter)) { + return -EBUSY; + } + #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); @@ -8205,7 +8216,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) total_packets += tx_buffer->gso_segs; /* free the skb */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (tx_buffer->type == IGB_TYPE_SKB) napi_consume_skb(tx_buffer->skb, napi_budget); else @@ -8259,7 +8270,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) budget--; } while (likely(budget)); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); } @@ -8272,7 +8283,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; - if (!adapter->ecdev && + if (!get_ecdev(adapter) && test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct e1000_hw *hw = &adapter->hw; @@ -8316,7 +8327,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) } #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(!adapter->ecdev && total_packets && + if (unlikely(!get_ecdev(adapter) && total_packets && netif_carrier_ok(tx_ring->netdev) && igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this @@ -8858,9 +8869,9 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); - ecdev_receive(adapter->ecdev, pktbuf, size); + ecdev_receive(get_ecdev(adapter), pktbuf, size); adapter->ec_watchdog_jiffies = jiffies; igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -8923,7 +8934,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) if (igb_is_non_eop(rx_ring, rx_desc)) continue; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { total_packets++; continue; } diff --git a/devices/igb/igb_main-5.15-ethercat.c b/devices/igb/igb_main-5.15-ethercat.c index 4d3703a5..a849cc57 100644 --- a/devices/igb/igb_main-5.15-ethercat.c +++ b/devices/igb/igb_main-5.15-ethercat.c @@ -935,7 +935,7 @@ static int igb_request_msix(struct igb_adapter *adapter) struct net_device *netdev = adapter->netdev; int i, err = 0, vector = 0, free_vector = 0; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return 0; } @@ -1036,8 +1036,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) if (q_vector->rx.ring) adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; - if (!adapter->ecdev) - netif_napi_del(&q_vector->napi); + netif_napi_del(&q_vector->napi); } @@ -1218,10 +1217,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, return -ENOMEM; /* initialize NAPI */ - if (!adapter->ecdev) { - netif_napi_add(adapter->netdev, &q_vector->napi, - igb_poll, 64); - } + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -1439,7 +1436,7 @@ static int igb_request_irq(struct igb_adapter *adapter) igb_assign_vector(adapter->q_vector[0], 0); - if (!adapter->ecdev && adapter->flags & IGB_FLAG_HAS_MSI) { + if (!get_ecdev(adapter) && adapter->flags & IGB_FLAG_HAS_MSI) { err = request_irq(pdev->irq, igb_intr_msi, 0, netdev->name, adapter); if (!err) @@ -1450,7 +1447,7 @@ static int igb_request_irq(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_HAS_MSI; } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, netdev->name, adapter); @@ -1465,7 +1462,7 @@ request_done: static void igb_free_irq(struct igb_adapter *adapter) { - if (adapter->ecdev) + if (get_ecdev(adapter)) return; if (adapter->flags & IGB_FLAG_HAS_MSIX) { int vector = 0, i; @@ -1505,7 +1502,7 @@ static void igb_irq_disable(struct igb_adapter *adapter) wr32(E1000_IMC, ~0); wrfl(); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* skip synchronizing IRQs */ return; } @@ -1527,7 +1524,7 @@ static void igb_irq_enable(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) + if (get_ecdev(adapter)) return; if (adapter->flags & IGB_FLAG_HAS_MSIX) { @@ -2145,7 +2142,7 @@ int igb_up(struct igb_adapter *adapter) clear_bit(__IGB_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&(adapter->q_vector[i]->napi)); } @@ -2168,7 +2165,7 @@ int igb_up(struct igb_adapter *adapter) wr32(E1000_CTRL_EXT, reg_data); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(adapter->netdev); /* start the watchdog. */ @@ -2202,7 +2199,7 @@ void igb_down(struct igb_adapter *adapter) igb_nfc_filter_exit(adapter); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } @@ -2220,7 +2217,7 @@ void igb_down(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; for (i = 0; i < adapter->num_q_vectors; i++) { - if (!adapter->ecdev && adapter->q_vector[i]) { + if (!get_ecdev(adapter) && adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); napi_disable(&adapter->q_vector[i]->napi); } @@ -2443,7 +2440,7 @@ void igb_reset(struct igb_adapter *adapter) break; } } - if (!adapter->ecdev && !netif_running(adapter->netdev)) + if (!get_ecdev(adapter) && !netif_running(adapter->netdev)) igb_power_down_link(adapter); igb_update_mng_vlan(adapter); @@ -2907,7 +2904,7 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct igb_adapter *adapter = netdev_priv(dev); - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; @@ -2976,7 +2973,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n, int nxmit = 0; int i; - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) @@ -3278,6 +3275,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; + adapter->ecdev_initialized = 0; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); @@ -3561,12 +3559,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) igb_get_hw_control(adapter); - adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); - if (adapter->ecdev) { + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = 1; + if (get_ecdev(adapter)) { init_irq_work(&adapter->ec_watchdog_kicker, ec_kick_watchdog); - err = ecdev_open(adapter->ecdev); + err = ecdev_open(get_ecdev(adapter)); if (err) { - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); goto err_register; } adapter->ec_watchdog_jiffies = jiffies; @@ -3866,10 +3865,10 @@ static void igb_remove(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); irq_work_sync(&adapter->ec_watchdog_kicker); - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); } pm_runtime_get_noresume(&pdev->dev); @@ -3906,7 +3905,7 @@ static void igb_remove(struct pci_dev *pdev) igb_disable_sriov(pdev); #endif - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { unregister_netdev(netdev); } @@ -4149,8 +4148,8 @@ static int __igb_open(struct net_device *netdev, bool resuming) if (!resuming) pm_runtime_get_sync(&pdev->dev); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); } @@ -4178,7 +4177,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) if (err) goto err_req_irq; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); @@ -4194,7 +4193,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&(adapter->q_vector[i]->napi)); } @@ -4213,14 +4212,14 @@ static int __igb_open(struct net_device *netdev, bool resuming) wr32(E1000_CTRL_EXT, reg_data); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(netdev); } if (!resuming) pm_runtime_put(&pdev->dev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -4450,7 +4449,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) struct device *dev = rx_ring->dev; int size, res; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* XDP RX-queue info */ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); @@ -4482,7 +4481,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - if (!adapter->ecdev) + if (!get_ecdev(adapter)) rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -4789,7 +4788,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, int reg_idx = ring->reg_idx; u32 rxdctl = 0; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL)); @@ -4929,7 +4928,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) /* Free all the Tx ring sk_buffs */ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(tx_buffer->skb); } @@ -5539,13 +5538,13 @@ static void igb_watchdog_task(struct work_struct *work) u32 connsw; u16 phy_data, retry_count = 20; - if (adapter->ecdev) + if (get_ecdev(adapter)) hw->mac.get_link_status = true; link = igb_has_link(adapter); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, link); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), link); return; } @@ -6196,7 +6195,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) struct net_device *netdev = tx_ring->netdev; struct igb_adapter *adapter = netdev_priv(netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_stop_subqueue(netdev, tx_ring->queue_index); } @@ -6213,7 +6212,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) return -EBUSY; /* A reprieve! */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_wake_subqueue(netdev, tx_ring->queue_index); } @@ -6369,7 +6368,7 @@ dma_error: DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(tx_buffer->skb); tx_buffer->skb = NULL; } @@ -6487,7 +6486,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - if (unlikely(!adapter->ecdev && + if (unlikely(!get_ecdev(adapter) && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && @@ -6526,12 +6525,12 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(first->skb); first->skb = NULL; } cleanup_tx_tstamp: - if (unlikely(!adapter->ecdev && tx_flags & IGB_TX_FLAGS_TSTAMP)) { + if (unlikely(!get_ecdev(adapter) && tx_flags & IGB_TX_FLAGS_TSTAMP)) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; if (adapter->hw.mac.type == e1000_82576) @@ -8129,6 +8128,10 @@ static int igb_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int work_done = 0; + if (get_ecdev(q_vector->adapter)) { + return -EBUSY; + } + #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); @@ -8203,7 +8206,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) total_packets += tx_buffer->gso_segs; /* free the skb */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (tx_buffer->type == IGB_TYPE_SKB) napi_consume_skb(tx_buffer->skb, napi_budget); else @@ -8257,7 +8260,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) budget--; } while (likely(budget)); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); } @@ -8270,7 +8273,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; - if (!adapter->ecdev && + if (!get_ecdev(adapter) && test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct e1000_hw *hw = &adapter->hw; @@ -8314,7 +8317,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) } #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(!adapter->ecdev && total_packets && + if (unlikely(!get_ecdev(adapter) && total_packets && netif_carrier_ok(tx_ring->netdev) && igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this @@ -8852,9 +8855,9 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); - ecdev_receive(adapter->ecdev, pktbuf, size); + ecdev_receive(get_ecdev(adapter), pktbuf, size); adapter->ec_watchdog_jiffies = jiffies; igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -8917,11 +8920,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) if (igb_is_non_eop(rx_ring, rx_desc)) continue; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { total_packets++; continue; } - + /* verify the packet layout is correct */ if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; diff --git a/devices/igb/igb_main-6.1-ethercat.c b/devices/igb/igb_main-6.1-ethercat.c index 8c1c1165..c82c4977 100644 --- a/devices/igb/igb_main-6.1-ethercat.c +++ b/devices/igb/igb_main-6.1-ethercat.c @@ -933,7 +933,7 @@ static int igb_request_msix(struct igb_adapter *adapter) struct net_device *netdev = adapter->netdev; int i, err = 0, vector = 0, free_vector = 0; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { return 0; } @@ -1034,8 +1034,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) if (q_vector->rx.ring) adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; - if (!adapter->ecdev) - netif_napi_del(&q_vector->napi); + netif_napi_del(&q_vector->napi); } @@ -1220,9 +1219,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, return -ENOMEM; /* initialize NAPI */ - if (!adapter->ecdev) { - netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll); - } + netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -1440,7 +1437,7 @@ static int igb_request_irq(struct igb_adapter *adapter) igb_assign_vector(adapter->q_vector[0], 0); - if (!adapter->ecdev && adapter->flags & IGB_FLAG_HAS_MSI) { + if (!get_ecdev(adapter) && adapter->flags & IGB_FLAG_HAS_MSI) { err = request_irq(pdev->irq, igb_intr_msi, 0, netdev->name, adapter); if (!err) @@ -1451,7 +1448,7 @@ static int igb_request_irq(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_HAS_MSI; } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, netdev->name, adapter); @@ -1466,7 +1463,7 @@ request_done: static void igb_free_irq(struct igb_adapter *adapter) { - if (adapter->ecdev) + if (get_ecdev(adapter)) return; if (adapter->flags & IGB_FLAG_HAS_MSIX) { int vector = 0, i; @@ -1506,7 +1503,7 @@ static void igb_irq_disable(struct igb_adapter *adapter) wr32(E1000_IMC, ~0); wrfl(); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* skip synchronizing IRQs */ return; } @@ -1528,7 +1525,7 @@ static void igb_irq_enable(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) + if (get_ecdev(adapter)) return; if (adapter->flags & IGB_FLAG_HAS_MSIX) { @@ -2146,7 +2143,7 @@ int igb_up(struct igb_adapter *adapter) clear_bit(__IGB_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&(adapter->q_vector[i]->napi)); } @@ -2169,7 +2166,7 @@ int igb_up(struct igb_adapter *adapter) wr32(E1000_CTRL_EXT, reg_data); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(adapter->netdev); /* start the watchdog. */ @@ -2203,7 +2200,7 @@ void igb_down(struct igb_adapter *adapter) igb_nfc_filter_exit(adapter); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } @@ -2221,7 +2218,7 @@ void igb_down(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; for (i = 0; i < adapter->num_q_vectors; i++) { - if (!adapter->ecdev && adapter->q_vector[i]) { + if (!get_ecdev(adapter) && adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); napi_disable(&adapter->q_vector[i]->napi); } @@ -2469,7 +2466,7 @@ void igb_reset(struct igb_adapter *adapter) break; } } - if (!adapter->ecdev && !netif_running(adapter->netdev)) + if (!get_ecdev(adapter) && !netif_running(adapter->netdev)) igb_power_down_link(adapter); igb_update_mng_vlan(adapter); @@ -2933,7 +2930,7 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct igb_adapter *adapter = netdev_priv(dev); - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; @@ -3002,7 +2999,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n, int nxmit = 0; int i; - if (adapter->ecdev) + if (get_ecdev(adapter)) return -EBUSY; if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) @@ -3298,6 +3295,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; + adapter->ecdev_initialized = 0; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); @@ -3580,12 +3578,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) igb_get_hw_control(adapter); - adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); - if (adapter->ecdev) { + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = 1; + if (get_ecdev(adapter)) { init_irq_work(&adapter->ec_watchdog_kicker, ec_kick_watchdog); - err = ecdev_open(adapter->ecdev); + err = ecdev_open(get_ecdev(adapter)); if (err) { - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); goto err_register; } adapter->ec_watchdog_jiffies = jiffies; @@ -3893,10 +3892,10 @@ static void igb_remove(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); irq_work_sync(&adapter->ec_watchdog_kicker); - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); } pm_runtime_get_noresume(&pdev->dev); @@ -3933,7 +3932,7 @@ static void igb_remove(struct pci_dev *pdev) igb_disable_sriov(pdev); #endif - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { unregister_netdev(netdev); } @@ -4179,8 +4178,8 @@ static int __igb_open(struct net_device *netdev, bool resuming) if (!resuming) pm_runtime_get_sync(&pdev->dev); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); } @@ -4208,7 +4207,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) if (err) goto err_req_irq; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); @@ -4224,7 +4223,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&(adapter->q_vector[i]->napi)); } @@ -4243,14 +4242,14 @@ static int __igb_open(struct net_device *netdev, bool resuming) wr32(E1000_CTRL_EXT, reg_data); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(netdev); } if (!resuming) pm_runtime_put(&pdev->dev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -4480,7 +4479,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) struct device *dev = rx_ring->dev; int size, res; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* XDP RX-queue info */ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); @@ -4512,7 +4511,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - if (!adapter->ecdev) + if (!get_ecdev(adapter)) rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -4819,7 +4818,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, int reg_idx = ring->reg_idx; u32 rxdctl = 0; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL)); @@ -4959,7 +4958,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) /* Free all the Tx ring sk_buffs or xdp frames */ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (tx_buffer->type == IGB_TYPE_SKB) dev_kfree_skb_any(tx_buffer->skb); else @@ -5572,13 +5571,13 @@ static void igb_watchdog_task(struct work_struct *work) u32 connsw; u16 phy_data, retry_count = 20; - if (adapter->ecdev) + if (get_ecdev(adapter)) hw->mac.get_link_status = true; link = igb_has_link(adapter); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, link); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), link); return; } @@ -6230,7 +6229,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) struct net_device *netdev = tx_ring->netdev; struct igb_adapter *adapter = netdev_priv(netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_stop_subqueue(netdev, tx_ring->queue_index); } @@ -6247,7 +6246,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) return -EBUSY; /* A reprieve! */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_wake_subqueue(netdev, tx_ring->queue_index); } @@ -6403,7 +6402,7 @@ dma_error: DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(tx_buffer->skb); tx_buffer->skb = NULL; } @@ -6555,7 +6554,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - if (unlikely(!adapter->ecdev && + if (unlikely(!get_ecdev(adapter) && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && @@ -6594,12 +6593,12 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(first->skb); first->skb = NULL; } cleanup_tx_tstamp: - if (unlikely(!adapter->ecdev && tx_flags & IGB_TX_FLAGS_TSTAMP)) { + if (unlikely(!get_ecdev(adapter) && tx_flags & IGB_TX_FLAGS_TSTAMP)) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; if (adapter->hw.mac.type == e1000_82576) @@ -8282,6 +8281,10 @@ static int igb_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int work_done = 0; + if (get_ecdev(q_vector->adapter)) { + return -EBUSY; + } + #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) igb_update_dca(q_vector); @@ -8356,7 +8359,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) total_packets += tx_buffer->gso_segs; /* free the skb */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { if (tx_buffer->type == IGB_TYPE_SKB) napi_consume_skb(tx_buffer->skb, napi_budget); else @@ -8410,7 +8413,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) budget--; } while (likely(budget)); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); } @@ -8423,7 +8426,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; - if (!adapter->ecdev && + if (!get_ecdev(adapter) && test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct e1000_hw *hw = &adapter->hw; @@ -8467,7 +8470,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) } #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(!adapter->ecdev && total_packets && + if (unlikely(!get_ecdev(adapter) && total_packets && netif_carrier_ok(tx_ring->netdev) && igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this @@ -9005,9 +9008,9 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); - ecdev_receive(adapter->ecdev, pktbuf, size); + ecdev_receive(get_ecdev(adapter), pktbuf, size); adapter->ec_watchdog_jiffies = jiffies; igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -9071,7 +9074,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) if (igb_is_non_eop(rx_ring, rx_desc)) continue; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { total_packets++; continue; } diff --git a/devices/igc/Kbuild.in b/devices/igc/Kbuild.in index e896c9c1..201ff013 100644 --- a/devices/igc/Kbuild.in +++ b/devices/igc/Kbuild.in @@ -48,6 +48,10 @@ ifeq (@ENABLE_IGC@,1) igc_ethtool-@KERNEL_IGC@-ethercat.o \ igc_nvm-@KERNEL_IGC@-ethercat.o +ifeq (@ENABLE_DRIVER_RESOURCE_VERIFYING@,1) + ccflags-y := -DEC_ENABLE_DRIVER_RESOURCE_VERIFYING +endif + CFLAGS_igc_main-@KERNEL_IGC@-ethercat.o = -DREV=$(REV) endif diff --git a/devices/igc/Makefile.am b/devices/igc/Makefile.am index ea3c2ac3..09ad7eac 100644 --- a/devices/igc/Makefile.am +++ b/devices/igc/Makefile.am @@ -21,99 +21,196 @@ include $(top_srcdir)/Makefile.kbuild -EXTRiA_DIST = igc-6.6-ethercat.h \ - igc_base-6.6-orig.h \ - igc_diag-6.6-orig.c \ - igc_ethtool-6.6-orig.c \ - igc_i225-6.6-orig.c \ - igc_mac-6.6-orig.c \ - igc_nvm-6.6-ethercat.h \ - igc_phy-6.6-orig.c \ - igc_regs-6.6-orig.h \ - igc_xdp-6.6-ethercat.c \ - igc-6.6-orig.h \ - igc_defines-6.6-ethercat.h \ - igc_diag-6.6-orig.h \ - igc_hw-6.6-ethercat.h \ - igc_i225-6.6-orig.h \ - igc_mac-6.6-orig.h \ - igc_nvm-6.6-orig.c \ - igc_phy-6.6-orig.h \ - igc_tsn-6.6-ethercat.c \ - igc_xdp-6.6-ethercat.h \ - igc_base-6.6-ethercat.c \ - igc_defines-6.6-orig.h \ - igc_dump-6.6-ethercat.c \ - igc_hw-6.6-orig.h \ - igc_main-6.6-ethercat.c \ - igc_nvm-6.6-orig.h \ - igc_ptp-6.6-ethercat.c \ - igc_tsn-6.6-ethercat.h \ - igc_xdp-6.6-orig.c\ - igc_base-6.6-ethercat.h \ - igc_diag-6.6-ethercat.c \ - igc_dump-6.6-orig.c \ - igc_i225-6.6-ethercat.c \ - igc_mac-6.6-ethercat.c \ - igc_main-6.6-orig.c \ - igc_phy-6.6-ethercat.c \ - igc_ptp-6.6-orig.c \ - igc_tsn-6.6-orig.c \ - igc_xdp-6.6-orig.h\ - igc_base-6.6-orig.c \ - igc_diag-6.6-ethercat.h \ - igc_ethtool-6.6-ethercat.c \ - igc_i225-6.6-ethercat.h \ - igc_mac-6.6-ethercat.h \ - igc_nvm-6.6-ethercat.c \ - igc_phy-6.6-ethercat.h \ - igc_regs-6.6-ethercat.h \ - igc_tsn-6.6-orig.h \ +EXTRA_DIST = \ + igc-5.14-ethercat.h \ + igc-5.14-orig.h \ + igc-6.1-ethercat.h \ + igc-6.1-orig.h \ igc-6.4-ethercat.h \ - igc_base-6.4-orig.h \ - igc_diag-6.4-orig.c \ - igc_ethtool-6.4-orig.c \ - igc_i225-6.4-orig.c \ - igc_mac-6.4-orig.c \ - igc_nvm-6.4-ethercat.h \ - igc_phy-6.4-orig.c \ - igc_regs-6.4-orig.h \ - igc_xdp-6.4-ethercat.c \ igc-6.4-orig.h \ - igc_defines-6.4-ethercat.h \ - igc_diag-6.4-orig.h \ - igc_hw-6.4-ethercat.h \ - igc_i225-6.4-orig.h \ - igc_mac-6.4-orig.h \ - igc_nvm-6.4-orig.c \ - igc_phy-6.4-orig.h \ - igc_tsn-6.4-ethercat.c \ - igc_xdp-6.4-ethercat.h \ + igc-6.6-ethercat.h \ + igc-6.6-orig.h \ + igc_base-5.14-ethercat.c \ + igc_base-5.14-ethercat.h \ + igc_base-5.14-orig.c \ + igc_base-5.14-orig.h \ + igc_base-6.1-ethercat.c \ + igc_base-6.1-ethercat.h \ + igc_base-6.1-orig.c \ + igc_base-6.1-orig.h \ igc_base-6.4-ethercat.c \ - igc_defines-6.4-orig.h \ - igc_dump-6.4-ethercat.c \ - igc_hw-6.4-orig.h \ - igc_main-6.4-ethercat.c \ - igc_nvm-6.4-orig.h \ - igc_ptp-6.4-ethercat.c \ - igc_tsn-6.4-ethercat.h \ - igc_xdp-6.4-orig.c\ igc_base-6.4-ethercat.h \ - igc_diag-6.4-ethercat.c \ - igc_dump-6.4-orig.c \ - igc_i225-6.4-ethercat.c \ - igc_mac-6.4-ethercat.c \ - igc_main-6.4-orig.c \ - igc_phy-6.4-ethercat.c \ - igc_ptp-6.4-orig.c \ - igc_tsn-6.4-orig.c \ - igc_xdp-6.4-orig.h\ igc_base-6.4-orig.c \ + igc_base-6.4-orig.h \ + igc_base-6.6-ethercat.c \ + igc_base-6.6-ethercat.h \ + igc_base-6.6-orig.c \ + igc_base-6.6-orig.h \ + igc_defines-5.14-ethercat.h \ + igc_defines-5.14-orig.h \ + igc_defines-6.1-ethercat.h \ + igc_defines-6.1-orig.h \ + igc_defines-6.4-ethercat.h \ + igc_defines-6.4-orig.h \ + igc_defines-6.6-ethercat.h \ + igc_defines-6.6-orig.h \ + igc_diag-5.14-ethercat.c \ + igc_diag-5.14-ethercat.h \ + igc_diag-5.14-orig.c \ + igc_diag-5.14-orig.h \ + igc_diag-6.1-ethercat.c \ + igc_diag-6.1-ethercat.h \ + igc_diag-6.1-orig.c \ + igc_diag-6.1-orig.h \ + igc_diag-6.4-ethercat.c \ igc_diag-6.4-ethercat.h \ + igc_diag-6.4-orig.c \ + igc_diag-6.4-orig.h \ + igc_diag-6.6-ethercat.c \ + igc_diag-6.6-ethercat.h \ + igc_diag-6.6-orig.c \ + igc_diag-6.6-orig.h \ + igc_dump-5.14-ethercat.c \ + igc_dump-5.14-orig.c \ + igc_dump-6.1-ethercat.c \ + igc_dump-6.1-orig.c \ + igc_dump-6.4-ethercat.c \ + igc_dump-6.4-orig.c \ + igc_dump-6.6-ethercat.c \ + igc_dump-6.6-orig.c \ + igc_ethtool-5.14-ethercat.c \ + igc_ethtool-5.14-orig.c \ + igc_ethtool-6.1-ethercat.c \ + igc_ethtool-6.1-orig.c \ igc_ethtool-6.4-ethercat.c \ + igc_ethtool-6.4-orig.c \ + igc_ethtool-6.6-ethercat.c \ + igc_ethtool-6.6-orig.c \ + igc_hw-5.14-ethercat.h \ + igc_hw-5.14-orig.h \ + igc_hw-6.1-ethercat.h \ + igc_hw-6.1-orig.h \ + igc_hw-6.4-ethercat.h \ + igc_hw-6.4-orig.h \ + igc_hw-6.6-ethercat.h \ + igc_hw-6.6-orig.h \ + igc_i225-5.14-ethercat.c \ + igc_i225-5.14-ethercat.h \ + igc_i225-5.14-orig.c \ + igc_i225-5.14-orig.h \ + igc_i225-6.1-ethercat.c \ + igc_i225-6.1-ethercat.h \ + igc_i225-6.1-orig.c \ + igc_i225-6.1-orig.h \ + igc_i225-6.4-ethercat.c \ igc_i225-6.4-ethercat.h \ + igc_i225-6.4-orig.c \ + igc_i225-6.4-orig.h \ + igc_i225-6.6-ethercat.c \ + igc_i225-6.6-ethercat.h \ + igc_i225-6.6-orig.c \ + igc_i225-6.6-orig.h \ + igc_mac-5.14-ethercat.c \ + igc_mac-5.14-ethercat.h \ + igc_mac-5.14-orig.c \ + igc_mac-5.14-orig.h \ + igc_mac-6.1-ethercat.c \ + igc_mac-6.1-ethercat.h \ + igc_mac-6.1-orig.c \ + igc_mac-6.1-orig.h \ + igc_mac-6.4-ethercat.c \ igc_mac-6.4-ethercat.h \ + igc_mac-6.4-orig.c \ + igc_mac-6.4-orig.h \ + igc_mac-6.6-ethercat.c \ + igc_mac-6.6-ethercat.h \ + igc_mac-6.6-orig.c \ + igc_mac-6.6-orig.h \ + igc_main-5.14-ethercat.c \ + igc_main-5.14-orig.c \ + igc_main-6.1-ethercat.c \ + igc_main-6.1-orig.c \ + igc_main-6.4-ethercat.c \ + igc_main-6.4-orig.c \ + igc_main-6.6-ethercat.c \ + igc_main-6.6-orig.c \ + igc_nvm-5.14-ethercat.c \ + igc_nvm-5.14-ethercat.h \ + igc_nvm-5.14-orig.c \ + igc_nvm-5.14-orig.h \ + igc_nvm-6.1-ethercat.c \ + igc_nvm-6.1-ethercat.h \ + igc_nvm-6.1-orig.c \ + igc_nvm-6.1-orig.h \ igc_nvm-6.4-ethercat.c \ + igc_nvm-6.4-ethercat.h \ + igc_nvm-6.4-orig.c \ + igc_nvm-6.4-orig.h \ + igc_nvm-6.6-ethercat.c \ + igc_nvm-6.6-ethercat.h \ + igc_nvm-6.6-orig.c \ + igc_nvm-6.6-orig.h \ + igc_phy-5.14-ethercat.c \ + igc_phy-5.14-ethercat.h \ + igc_phy-5.14-orig.c \ + igc_phy-5.14-orig.h \ + igc_phy-6.1-ethercat.c \ + igc_phy-6.1-ethercat.h \ + igc_phy-6.1-orig.c \ + igc_phy-6.1-orig.h \ + igc_phy-6.4-ethercat.c \ igc_phy-6.4-ethercat.h \ + igc_phy-6.4-orig.c \ + igc_phy-6.4-orig.h \ + igc_phy-6.6-ethercat.c \ + igc_phy-6.6-ethercat.h \ + igc_phy-6.6-orig.c \ + igc_phy-6.6-orig.h \ + igc_ptp-5.14-ethercat.c \ + igc_ptp-5.14-orig.c \ + igc_ptp-6.1-ethercat.c \ + igc_ptp-6.1-orig.c \ + igc_ptp-6.4-ethercat.c \ + igc_ptp-6.4-orig.c \ + igc_ptp-6.6-ethercat.c \ + igc_ptp-6.6-orig.c \ + igc_regs-5.14-ethercat.h \ + igc_regs-5.14-orig.h \ + igc_regs-6.1-ethercat.h \ + igc_regs-6.1-orig.h \ igc_regs-6.4-ethercat.h \ - igc_tsn-6.4-orig.h + igc_regs-6.4-orig.h \ + igc_regs-6.6-ethercat.h \ + igc_regs-6.6-orig.h \ + igc_tsn-5.14-ethercat.c \ + igc_tsn-5.14-ethercat.h \ + igc_tsn-5.14-orig.c \ + igc_tsn-5.14-orig.h \ + igc_tsn-6.1-ethercat.c \ + igc_tsn-6.1-ethercat.h \ + igc_tsn-6.1-orig.c \ + igc_tsn-6.1-orig.h \ + igc_tsn-6.4-ethercat.c \ + igc_tsn-6.4-ethercat.h \ + igc_tsn-6.4-orig.c \ + igc_tsn-6.4-orig.h \ + igc_tsn-6.6-ethercat.c \ + igc_tsn-6.6-ethercat.h \ + igc_tsn-6.6-orig.c \ + igc_tsn-6.6-orig.h \ + igc_xdp-5.14-ethercat.c \ + igc_xdp-5.14-ethercat.h \ + igc_xdp-5.14-orig.c \ + igc_xdp-5.14-orig.h \ + igc_xdp-6.1-ethercat.c \ + igc_xdp-6.1-ethercat.h \ + igc_xdp-6.1-orig.c \ + igc_xdp-6.1-orig.h \ + igc_xdp-6.4-ethercat.c \ + igc_xdp-6.4-ethercat.h \ + igc_xdp-6.4-orig.c \ + igc_xdp-6.4-orig.h \ + igc_xdp-6.6-ethercat.c \ + igc_xdp-6.6-ethercat.h \ + igc_xdp-6.6-orig.c \ + igc_xdp-6.6-orig.h diff --git a/devices/igc/igc-5.14-ethercat.h b/devices/igc/igc-5.14-ethercat.h new file mode 100644 index 00000000..c41d6583 --- /dev/null +++ b/devices/igc/igc-5.14-ethercat.h @@ -0,0 +1,627 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_H_ +#define _IGC_H_ +#include "../ecdev.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc_hw-5.14-ethercat.h" + +void igc_ethtool_set_ops(struct net_device *); + +/* Transmit and receive queues */ +#define IGC_MAX_RX_QUEUES 4 +#define IGC_MAX_TX_QUEUES 4 + +#define MAX_Q_VECTORS 8 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +#define MAX_ETYPE_FILTER 8 +#define IGC_RETA_SIZE 128 + +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + +enum igc_mac_filter_type { + IGC_MAC_FILTER_TYPE_DST = 0, + IGC_MAC_FILTER_TYPE_SRC +}; + +struct igc_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igc_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igc_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igc_ring_container { + struct igc_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igc_ring { + struct igc_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igc_tx_buffer *tx_buffer_info; + struct igc_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + + u32 start_time; + u32 end_time; + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igc_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct igc_rx_queue_stats rx_stats; + struct igc_rx_packet_stats pkt_stats; + struct u64_stats_sync rx_syncp; + struct sk_buff *skb; + }; + }; + + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; +} ____cacheline_internodealigned_in_smp; + +/* Board specific private data structure */ +struct igc_adapter { + struct net_device *netdev; + + struct ethtool_eee eee; + u16 eee_advert; + + unsigned long state; + unsigned int flags; + unsigned int num_q_vectors; + + struct msix_entry *msix_entries; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 port_num; + + u8 __iomem *io_addr; + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + + u8 tx_timeout_factor; + + int msg_enable; + u32 max_frame_size; + u32 min_frame_size; + + ktime_t base_time; + ktime_t cycle_time; + + /* OS defined structs */ + struct pci_dev *pdev; + /* lock for statistics */ + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in igc_hw.h */ + struct igc_hw hw; + struct igc_hw_stats stats; + + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + u16 tx_ring_count; + u16 rx_ring_count; + + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + + u32 rss_queues; + u32 rss_indir_tbl_init; + + /* Any access to elements in nfc_rule_list is protected by the + * nfc_rule_lock. + */ + struct mutex nfc_rule_lock; + struct list_head nfc_rule_list; + unsigned int nfc_rule_count; + + u8 rss_indir_tbl[IGC_RETA_SIZE]; + + unsigned long link_check_timeout; + struct igc_info ei; + + u32 test_icr; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned int ptp_flags; + /* System time value lock */ + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ + ktime_t ptp_reset_start; /* Reset time in clock mono */ + + char fw_version[32]; + + struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; + + /* EtherCAT device variables */ + ec_device_t *ecdev_; + unsigned long ec_watchdog_jiffies; + struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; +}; + +static inline ec_device_t *get_ecdev(struct igc_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_open(struct net_device *netdev); +int igc_close(struct net_device *netdev); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); +void igc_update_stats(struct igc_adapter *adapter); +void igc_disable_rx_ring(struct igc_ring *ring); +void igc_enable_rx_ring(struct igc_ring *ring); +void igc_disable_tx_ring(struct igc_ring *ring); +void igc_enable_tx_ring(struct igc_ring *ring); +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); + +/* igc_dump declarations */ +void igc_rings_dump(struct igc_adapter *adapter); +void igc_regs_dump(struct igc_adapter *adapter); + +extern char igc_driver_name[]; + +#define IGC_REGS_LEN 740 + +/* flags controlling PTP/1588 function */ +#define IGC_PTP_ENABLED BIT(0) + +/* Flags definitions */ +#define IGC_FLAG_HAS_MSI BIT(0) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) +#define IGC_FLAG_PTP BIT(8) +#define IGC_FLAG_WOL_SUPPORTED BIT(8) +#define IGC_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGC_FLAG_MEDIA_RESET BIT(10) +#define IGC_FLAG_MAS_ENABLE BIT(12) +#define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_EEE BIT(14) +#define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_TSN_QBV_ENABLED BIT(17) + +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + +/* Interrupt defines */ +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_4K_ITR 980 +#define IGC_20K_ITR 196 +#define IGC_70K_ITR 56 + +#define IGC_DEFAULT_ITR 3 /* dynamic */ +#define IGC_MAX_ITR_USECS 10000 +#define IGC_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_MSIX_ENTRIES 10 + +/* TX/RX descriptor defines */ +#define IGC_DEFAULT_TXD 256 +#define IGC_DEFAULT_TX_WORK 128 +#define IGC_MIN_TXD 80 +#define IGC_MAX_TXD 4096 + +#define IGC_DEFAULT_RXD 256 +#define IGC_MIN_RXD 80 +#define IGC_MAX_RXD 4096 + +/* Supported Rx Buffer Sizes */ +#define IGC_RXBUFFER_256 256 +#define IGC_RXBUFFER_2048 2048 +#define IGC_RXBUFFER_3072 3072 + +#define AUTO_ALL_MODES 0 +#define IGC_RX_HDR_LEN IGC_RXBUFFER_256 + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGC_I225_TX_LATENCY_10 240 +#define IGC_I225_TX_LATENCY_100 58 +#define IGC_I225_TX_LATENCY_1000 80 +#define IGC_I225_TX_LATENCY_2500 1325 +#define IGC_I225_RX_LATENCY_10 6450 +#define IGC_I225_RX_LATENCY_100 185 +#define IGC_I225_RX_LATENCY_1000 300 +#define IGC_I225_RX_LATENCY_2500 1485 + +/* RX and TX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGC_RX_PTHRESH 8 +#define IGC_RX_HTHRESH 8 +#define IGC_TX_PTHRESH 8 +#define IGC_TX_HTHRESH 1 +#define IGC_RX_WTHRESH 4 +#define IGC_TX_WTHRESH 16 + +#define IGC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define IGC_TS_HDR_LEN 16 + +#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + +#if (PAGE_SIZE < 8192) +#define IGC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) +#else +#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGC_TX_FLAGS_VLAN_SHIFT 16 + +/* igc_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, + __IGC_PTP_TX_IN_PROGRESS, +}; + +enum igc_tx_flags { + /* cmd_type flags */ + IGC_TX_FLAGS_VLAN = 0x01, + IGC_TX_FLAGS_TSO = 0x02, + IGC_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGC_TX_FLAGS_IPV4 = 0x10, + IGC_TX_FLAGS_CSUM = 0x20, +}; + +enum igc_boards { + board_base, +}; + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGC_MAX_TXD_PWR 15 +#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum igc_tx_buffer_type { + IGC_TX_BUFFER_TYPE_SKB, + IGC_TX_BUFFER_TYPE_XDP, + IGC_TX_BUFFER_TYPE_XSK, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igc_tx_buffer_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igc_rx_buffer { + union { + struct { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; + }; + struct xdp_buff *xdp; + }; +}; + +struct igc_q_vector { + struct igc_adapter *adapter; /* backlink */ + void __iomem *itr_register; + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + + struct igc_ring_container rx, tx; + + struct napi_struct napi; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + struct net_device poll_dev; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igc_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = 0x1, + IGC_FILTER_FLAG_VLAN_TCI = 0x2, + IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, +}; + +struct igc_nfc_filter { + u8 match_flags; + u16 etype; + u16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct igc_nfc_rule { + struct list_head list; + struct igc_nfc_filter filter; + u32 location; + u16 action; +}; + +/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority + * based, and 8 ethertype based. + */ +#define IGC_MAX_RXNFC_RULES 32 + +/* igc_desc_unused - calculate if we have unused descriptors */ +static inline u16 igc_desc_unused(const struct igc_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline s32 igc_reset_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +enum igc_ring_flags_t { + IGC_RING_FLAG_RX_3K_BUFFER, + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGC_RING_FLAG_RX_SCTP_CSUM, + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, + IGC_RING_FLAG_TX_CTX_IDX, + IGC_RING_FLAG_TX_DETECT_HANG, + IGC_RING_FLAG_AF_XDP_ZC, +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGC_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; +#endif + return IGC_RXBUFFER_2048; +} + +static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return -EOPNOTSUPP; +} + +void igc_reinit_locked(struct igc_adapter *); +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location); +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); + +void igc_ptp_init(struct igc_adapter *adapter); +void igc_ptp_reset(struct igc_adapter *adapter); +void igc_ptp_suspend(struct igc_adapter *adapter); +void igc_ptp_stop(struct igc_adapter *adapter); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf); +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); + +#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) + +#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) + +#define IGC_RX_DESC(R, i) \ + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) +#define IGC_TX_DESC(R, i) \ + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) +#define IGC_TX_CTXTDESC(R, i) \ + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) + +#endif /* _IGC_H_ */ diff --git a/devices/igc/igc-5.14-orig.h b/devices/igc/igc-5.14-orig.h new file mode 100644 index 00000000..5901ed9f --- /dev/null +++ b/devices/igc/igc-5.14-orig.h @@ -0,0 +1,611 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_H_ +#define _IGC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc_hw.h" + +void igc_ethtool_set_ops(struct net_device *); + +/* Transmit and receive queues */ +#define IGC_MAX_RX_QUEUES 4 +#define IGC_MAX_TX_QUEUES 4 + +#define MAX_Q_VECTORS 8 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +#define MAX_ETYPE_FILTER 8 +#define IGC_RETA_SIZE 128 + +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + +enum igc_mac_filter_type { + IGC_MAC_FILTER_TYPE_DST = 0, + IGC_MAC_FILTER_TYPE_SRC +}; + +struct igc_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igc_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igc_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igc_ring_container { + struct igc_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igc_ring { + struct igc_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igc_tx_buffer *tx_buffer_info; + struct igc_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + + u32 start_time; + u32 end_time; + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igc_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct igc_rx_queue_stats rx_stats; + struct igc_rx_packet_stats pkt_stats; + struct u64_stats_sync rx_syncp; + struct sk_buff *skb; + }; + }; + + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; +} ____cacheline_internodealigned_in_smp; + +/* Board specific private data structure */ +struct igc_adapter { + struct net_device *netdev; + + struct ethtool_eee eee; + u16 eee_advert; + + unsigned long state; + unsigned int flags; + unsigned int num_q_vectors; + + struct msix_entry *msix_entries; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 port_num; + + u8 __iomem *io_addr; + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + + u8 tx_timeout_factor; + + int msg_enable; + u32 max_frame_size; + u32 min_frame_size; + + ktime_t base_time; + ktime_t cycle_time; + + /* OS defined structs */ + struct pci_dev *pdev; + /* lock for statistics */ + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in igc_hw.h */ + struct igc_hw hw; + struct igc_hw_stats stats; + + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + u16 tx_ring_count; + u16 rx_ring_count; + + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + + u32 rss_queues; + u32 rss_indir_tbl_init; + + /* Any access to elements in nfc_rule_list is protected by the + * nfc_rule_lock. + */ + struct mutex nfc_rule_lock; + struct list_head nfc_rule_list; + unsigned int nfc_rule_count; + + u8 rss_indir_tbl[IGC_RETA_SIZE]; + + unsigned long link_check_timeout; + struct igc_info ei; + + u32 test_icr; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned int ptp_flags; + /* System time value lock */ + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ + ktime_t ptp_reset_start; /* Reset time in clock mono */ + + char fw_version[32]; + + struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; +}; + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_open(struct net_device *netdev); +int igc_close(struct net_device *netdev); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); +void igc_update_stats(struct igc_adapter *adapter); +void igc_disable_rx_ring(struct igc_ring *ring); +void igc_enable_rx_ring(struct igc_ring *ring); +void igc_disable_tx_ring(struct igc_ring *ring); +void igc_enable_tx_ring(struct igc_ring *ring); +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); + +/* igc_dump declarations */ +void igc_rings_dump(struct igc_adapter *adapter); +void igc_regs_dump(struct igc_adapter *adapter); + +extern char igc_driver_name[]; + +#define IGC_REGS_LEN 740 + +/* flags controlling PTP/1588 function */ +#define IGC_PTP_ENABLED BIT(0) + +/* Flags definitions */ +#define IGC_FLAG_HAS_MSI BIT(0) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) +#define IGC_FLAG_PTP BIT(8) +#define IGC_FLAG_WOL_SUPPORTED BIT(8) +#define IGC_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGC_FLAG_MEDIA_RESET BIT(10) +#define IGC_FLAG_MAS_ENABLE BIT(12) +#define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_EEE BIT(14) +#define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_TSN_QBV_ENABLED BIT(17) + +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + +/* Interrupt defines */ +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_4K_ITR 980 +#define IGC_20K_ITR 196 +#define IGC_70K_ITR 56 + +#define IGC_DEFAULT_ITR 3 /* dynamic */ +#define IGC_MAX_ITR_USECS 10000 +#define IGC_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_MSIX_ENTRIES 10 + +/* TX/RX descriptor defines */ +#define IGC_DEFAULT_TXD 256 +#define IGC_DEFAULT_TX_WORK 128 +#define IGC_MIN_TXD 80 +#define IGC_MAX_TXD 4096 + +#define IGC_DEFAULT_RXD 256 +#define IGC_MIN_RXD 80 +#define IGC_MAX_RXD 4096 + +/* Supported Rx Buffer Sizes */ +#define IGC_RXBUFFER_256 256 +#define IGC_RXBUFFER_2048 2048 +#define IGC_RXBUFFER_3072 3072 + +#define AUTO_ALL_MODES 0 +#define IGC_RX_HDR_LEN IGC_RXBUFFER_256 + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGC_I225_TX_LATENCY_10 240 +#define IGC_I225_TX_LATENCY_100 58 +#define IGC_I225_TX_LATENCY_1000 80 +#define IGC_I225_TX_LATENCY_2500 1325 +#define IGC_I225_RX_LATENCY_10 6450 +#define IGC_I225_RX_LATENCY_100 185 +#define IGC_I225_RX_LATENCY_1000 300 +#define IGC_I225_RX_LATENCY_2500 1485 + +/* RX and TX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGC_RX_PTHRESH 8 +#define IGC_RX_HTHRESH 8 +#define IGC_TX_PTHRESH 8 +#define IGC_TX_HTHRESH 1 +#define IGC_RX_WTHRESH 4 +#define IGC_TX_WTHRESH 16 + +#define IGC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define IGC_TS_HDR_LEN 16 + +#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + +#if (PAGE_SIZE < 8192) +#define IGC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) +#else +#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGC_TX_FLAGS_VLAN_SHIFT 16 + +/* igc_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, + __IGC_PTP_TX_IN_PROGRESS, +}; + +enum igc_tx_flags { + /* cmd_type flags */ + IGC_TX_FLAGS_VLAN = 0x01, + IGC_TX_FLAGS_TSO = 0x02, + IGC_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGC_TX_FLAGS_IPV4 = 0x10, + IGC_TX_FLAGS_CSUM = 0x20, +}; + +enum igc_boards { + board_base, +}; + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGC_MAX_TXD_PWR 15 +#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum igc_tx_buffer_type { + IGC_TX_BUFFER_TYPE_SKB, + IGC_TX_BUFFER_TYPE_XDP, + IGC_TX_BUFFER_TYPE_XSK, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igc_tx_buffer_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igc_rx_buffer { + union { + struct { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; + }; + struct xdp_buff *xdp; + }; +}; + +struct igc_q_vector { + struct igc_adapter *adapter; /* backlink */ + void __iomem *itr_register; + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + + struct igc_ring_container rx, tx; + + struct napi_struct napi; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + struct net_device poll_dev; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igc_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = 0x1, + IGC_FILTER_FLAG_VLAN_TCI = 0x2, + IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, +}; + +struct igc_nfc_filter { + u8 match_flags; + u16 etype; + u16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct igc_nfc_rule { + struct list_head list; + struct igc_nfc_filter filter; + u32 location; + u16 action; +}; + +/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority + * based, and 8 ethertype based. + */ +#define IGC_MAX_RXNFC_RULES 32 + +/* igc_desc_unused - calculate if we have unused descriptors */ +static inline u16 igc_desc_unused(const struct igc_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline s32 igc_reset_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +enum igc_ring_flags_t { + IGC_RING_FLAG_RX_3K_BUFFER, + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGC_RING_FLAG_RX_SCTP_CSUM, + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, + IGC_RING_FLAG_TX_CTX_IDX, + IGC_RING_FLAG_TX_DETECT_HANG, + IGC_RING_FLAG_AF_XDP_ZC, +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGC_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; +#endif + return IGC_RXBUFFER_2048; +} + +static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return -EOPNOTSUPP; +} + +void igc_reinit_locked(struct igc_adapter *); +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location); +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); + +void igc_ptp_init(struct igc_adapter *adapter); +void igc_ptp_reset(struct igc_adapter *adapter); +void igc_ptp_suspend(struct igc_adapter *adapter); +void igc_ptp_stop(struct igc_adapter *adapter); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf); +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); + +#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) + +#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) + +#define IGC_RX_DESC(R, i) \ + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) +#define IGC_TX_DESC(R, i) \ + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) +#define IGC_TX_CTXTDESC(R, i) \ + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) + +#endif /* _IGC_H_ */ diff --git a/devices/igc/igc-6.1-ethercat.h b/devices/igc/igc-6.1-ethercat.h new file mode 100644 index 00000000..13330ed5 --- /dev/null +++ b/devices/igc/igc-6.1-ethercat.h @@ -0,0 +1,665 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_H_ +#define _IGC_H_ +#include "../ecdev.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc_hw-6.1-ethercat.h" + +void igc_ethtool_set_ops(struct net_device *); + +/* Transmit and receive queues */ +#define IGC_MAX_RX_QUEUES 4 +#define IGC_MAX_TX_QUEUES 4 + +#define MAX_Q_VECTORS 8 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +#define MAX_ETYPE_FILTER 8 +#define IGC_RETA_SIZE 128 + +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + +#define MAX_FLEX_FILTER 32 + +enum igc_mac_filter_type { + IGC_MAC_FILTER_TYPE_DST = 0, + IGC_MAC_FILTER_TYPE_SRC +}; + +struct igc_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igc_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igc_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igc_ring_container { + struct igc_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igc_ring { + struct igc_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igc_tx_buffer *tx_buffer_info; + struct igc_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ + ktime_t last_ff_cycle; /* Last cycle with an active first flag */ + + u32 start_time; + u32 end_time; + + /* CBS parameters */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igc_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct igc_rx_queue_stats rx_stats; + struct igc_rx_packet_stats pkt_stats; + struct u64_stats_sync rx_syncp; + struct sk_buff *skb; + }; + }; + + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; +} ____cacheline_internodealigned_in_smp; + +/* Board specific private data structure */ +struct igc_adapter { + struct net_device *netdev; + + struct ethtool_eee eee; + u16 eee_advert; + + unsigned long state; + unsigned int flags; + unsigned int num_q_vectors; + + struct msix_entry *msix_entries; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 port_num; + + u8 __iomem *io_addr; + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + + u8 tx_timeout_factor; + + int msg_enable; + u32 max_frame_size; + u32 min_frame_size; + + ktime_t base_time; + ktime_t cycle_time; + bool qbv_enable; + + /* OS defined structs */ + struct pci_dev *pdev; + /* lock for statistics */ + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in igc_hw.h */ + struct igc_hw hw; + struct igc_hw_stats stats; + + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + u16 tx_ring_count; + u16 rx_ring_count; + + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + + u32 rss_queues; + u32 rss_indir_tbl_init; + + /* Any access to elements in nfc_rule_list is protected by the + * nfc_rule_lock. + */ + struct mutex nfc_rule_lock; + struct list_head nfc_rule_list; + unsigned int nfc_rule_count; + + u8 rss_indir_tbl[IGC_RETA_SIZE]; + + unsigned long link_check_timeout; + struct igc_info ei; + + u32 test_icr; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned int ptp_flags; + /* System time value lock */ + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ + ktime_t ptp_reset_start; /* Reset time in clock mono */ + struct system_time_snapshot snapshot; + + char fw_version[32]; + + struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; + + /* EtherCAT device variables */ + ec_device_t *ecdev_; + unsigned long ec_watchdog_jiffies; + struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; +}; + +static inline ec_device_t *get_ecdev(struct igc_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_open(struct net_device *netdev); +int igc_close(struct net_device *netdev); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +void igc_update_stats(struct igc_adapter *adapter); +void igc_disable_rx_ring(struct igc_ring *ring); +void igc_enable_rx_ring(struct igc_ring *ring); +void igc_disable_tx_ring(struct igc_ring *ring); +void igc_enable_tx_ring(struct igc_ring *ring); +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); + +/* igc_dump declarations */ +void igc_rings_dump(struct igc_adapter *adapter); +void igc_regs_dump(struct igc_adapter *adapter); + +extern char igc_driver_name[]; + +#define IGC_REGS_LEN 740 + +/* flags controlling PTP/1588 function */ +#define IGC_PTP_ENABLED BIT(0) + +/* Flags definitions */ +#define IGC_FLAG_HAS_MSI BIT(0) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) +#define IGC_FLAG_PTP BIT(8) +#define IGC_FLAG_WOL_SUPPORTED BIT(8) +#define IGC_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGC_FLAG_MEDIA_RESET BIT(10) +#define IGC_FLAG_MAS_ENABLE BIT(12) +#define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_EEE BIT(14) +#define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_TSN_QBV_ENABLED BIT(17) +#define IGC_FLAG_TSN_QAV_ENABLED BIT(18) + +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) + +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + +/* Interrupt defines */ +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_4K_ITR 980 +#define IGC_20K_ITR 196 +#define IGC_70K_ITR 56 + +#define IGC_DEFAULT_ITR 3 /* dynamic */ +#define IGC_MAX_ITR_USECS 10000 +#define IGC_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_MSIX_ENTRIES 10 + +/* TX/RX descriptor defines */ +#define IGC_DEFAULT_TXD 256 +#define IGC_DEFAULT_TX_WORK 128 +#define IGC_MIN_TXD 80 +#define IGC_MAX_TXD 4096 + +#define IGC_DEFAULT_RXD 256 +#define IGC_MIN_RXD 80 +#define IGC_MAX_RXD 4096 + +/* Supported Rx Buffer Sizes */ +#define IGC_RXBUFFER_256 256 +#define IGC_RXBUFFER_2048 2048 +#define IGC_RXBUFFER_3072 3072 + +#define AUTO_ALL_MODES 0 +#define IGC_RX_HDR_LEN IGC_RXBUFFER_256 + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGC_I225_TX_LATENCY_10 240 +#define IGC_I225_TX_LATENCY_100 58 +#define IGC_I225_TX_LATENCY_1000 80 +#define IGC_I225_TX_LATENCY_2500 1325 +#define IGC_I225_RX_LATENCY_10 6450 +#define IGC_I225_RX_LATENCY_100 185 +#define IGC_I225_RX_LATENCY_1000 300 +#define IGC_I225_RX_LATENCY_2500 1485 + +/* RX and TX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGC_RX_PTHRESH 8 +#define IGC_RX_HTHRESH 8 +#define IGC_TX_PTHRESH 8 +#define IGC_TX_HTHRESH 1 +#define IGC_RX_WTHRESH 4 +#define IGC_TX_WTHRESH 16 + +#define IGC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define IGC_TS_HDR_LEN 16 + +#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + +#if (PAGE_SIZE < 8192) +#define IGC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) +#else +#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGC_TX_FLAGS_VLAN_SHIFT 16 + +/* igc_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, + __IGC_PTP_TX_IN_PROGRESS, +}; + +enum igc_tx_flags { + /* cmd_type flags */ + IGC_TX_FLAGS_VLAN = 0x01, + IGC_TX_FLAGS_TSO = 0x02, + IGC_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGC_TX_FLAGS_IPV4 = 0x10, + IGC_TX_FLAGS_CSUM = 0x20, +}; + +enum igc_boards { + board_base, +}; + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGC_MAX_TXD_PWR 15 +#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum igc_tx_buffer_type { + IGC_TX_BUFFER_TYPE_SKB, + IGC_TX_BUFFER_TYPE_XDP, + IGC_TX_BUFFER_TYPE_XSK, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igc_tx_buffer_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igc_rx_buffer { + union { + struct { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; + }; + struct xdp_buff *xdp; + }; +}; + +struct igc_q_vector { + struct igc_adapter *adapter; /* backlink */ + void __iomem *itr_register; + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + + struct igc_ring_container rx, tx; + + struct napi_struct napi; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + struct net_device poll_dev; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igc_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = BIT(0), + IGC_FILTER_FLAG_VLAN_TCI = BIT(1), + IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2), + IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3), + IGC_FILTER_FLAG_USER_DATA = BIT(4), + IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5), +}; + +struct igc_nfc_filter { + u8 match_flags; + u16 etype; + __be16 vlan_etype; + u16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; + u8 user_data[8]; + u8 user_mask[8]; + u8 flex_index; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +struct igc_nfc_rule { + struct list_head list; + struct igc_nfc_filter filter; + u32 location; + u16 action; + bool flex; +}; + +/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority + * based, 8 ethertype based and 32 Flex filter based rules. + */ +#define IGC_MAX_RXNFC_RULES 64 + +struct igc_flex_filter { + u8 index; + u8 data[128]; + u8 mask[16]; + u8 length; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +/* igc_desc_unused - calculate if we have unused descriptors */ +static inline u16 igc_desc_unused(const struct igc_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline s32 igc_reset_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +enum igc_ring_flags_t { + IGC_RING_FLAG_RX_3K_BUFFER, + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGC_RING_FLAG_RX_SCTP_CSUM, + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, + IGC_RING_FLAG_TX_CTX_IDX, + IGC_RING_FLAG_TX_DETECT_HANG, + IGC_RING_FLAG_AF_XDP_ZC, +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGC_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; +#endif + return IGC_RXBUFFER_2048; +} + +static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return -EOPNOTSUPP; +} + +void igc_reinit_locked(struct igc_adapter *); +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location); +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); + +void igc_ptp_init(struct igc_adapter *adapter); +void igc_ptp_reset(struct igc_adapter *adapter); +void igc_ptp_suspend(struct igc_adapter *adapter); +void igc_ptp_stop(struct igc_adapter *adapter); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf); +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); + +#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) + +#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) + +#define IGC_RX_DESC(R, i) \ + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) +#define IGC_TX_DESC(R, i) \ + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) +#define IGC_TX_CTXTDESC(R, i) \ + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) + +#endif /* _IGC_H_ */ diff --git a/devices/igc/igc-6.1-orig.h b/devices/igc/igc-6.1-orig.h new file mode 100644 index 00000000..df3e26c0 --- /dev/null +++ b/devices/igc/igc-6.1-orig.h @@ -0,0 +1,649 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_H_ +#define _IGC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc_hw.h" + +void igc_ethtool_set_ops(struct net_device *); + +/* Transmit and receive queues */ +#define IGC_MAX_RX_QUEUES 4 +#define IGC_MAX_TX_QUEUES 4 + +#define MAX_Q_VECTORS 8 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +#define MAX_ETYPE_FILTER 8 +#define IGC_RETA_SIZE 128 + +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + +#define MAX_FLEX_FILTER 32 + +enum igc_mac_filter_type { + IGC_MAC_FILTER_TYPE_DST = 0, + IGC_MAC_FILTER_TYPE_SRC +}; + +struct igc_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igc_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igc_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igc_ring_container { + struct igc_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igc_ring { + struct igc_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igc_tx_buffer *tx_buffer_info; + struct igc_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ + ktime_t last_ff_cycle; /* Last cycle with an active first flag */ + + u32 start_time; + u32 end_time; + + /* CBS parameters */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igc_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct igc_rx_queue_stats rx_stats; + struct igc_rx_packet_stats pkt_stats; + struct u64_stats_sync rx_syncp; + struct sk_buff *skb; + }; + }; + + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; +} ____cacheline_internodealigned_in_smp; + +/* Board specific private data structure */ +struct igc_adapter { + struct net_device *netdev; + + struct ethtool_eee eee; + u16 eee_advert; + + unsigned long state; + unsigned int flags; + unsigned int num_q_vectors; + + struct msix_entry *msix_entries; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 port_num; + + u8 __iomem *io_addr; + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + + u8 tx_timeout_factor; + + int msg_enable; + u32 max_frame_size; + u32 min_frame_size; + + ktime_t base_time; + ktime_t cycle_time; + bool qbv_enable; + + /* OS defined structs */ + struct pci_dev *pdev; + /* lock for statistics */ + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in igc_hw.h */ + struct igc_hw hw; + struct igc_hw_stats stats; + + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + u16 tx_ring_count; + u16 rx_ring_count; + + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + + u32 rss_queues; + u32 rss_indir_tbl_init; + + /* Any access to elements in nfc_rule_list is protected by the + * nfc_rule_lock. + */ + struct mutex nfc_rule_lock; + struct list_head nfc_rule_list; + unsigned int nfc_rule_count; + + u8 rss_indir_tbl[IGC_RETA_SIZE]; + + unsigned long link_check_timeout; + struct igc_info ei; + + u32 test_icr; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned int ptp_flags; + /* System time value lock */ + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ + ktime_t ptp_reset_start; /* Reset time in clock mono */ + struct system_time_snapshot snapshot; + + char fw_version[32]; + + struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; +}; + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_open(struct net_device *netdev); +int igc_close(struct net_device *netdev); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +void igc_update_stats(struct igc_adapter *adapter); +void igc_disable_rx_ring(struct igc_ring *ring); +void igc_enable_rx_ring(struct igc_ring *ring); +void igc_disable_tx_ring(struct igc_ring *ring); +void igc_enable_tx_ring(struct igc_ring *ring); +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); + +/* igc_dump declarations */ +void igc_rings_dump(struct igc_adapter *adapter); +void igc_regs_dump(struct igc_adapter *adapter); + +extern char igc_driver_name[]; + +#define IGC_REGS_LEN 740 + +/* flags controlling PTP/1588 function */ +#define IGC_PTP_ENABLED BIT(0) + +/* Flags definitions */ +#define IGC_FLAG_HAS_MSI BIT(0) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) +#define IGC_FLAG_PTP BIT(8) +#define IGC_FLAG_WOL_SUPPORTED BIT(8) +#define IGC_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGC_FLAG_MEDIA_RESET BIT(10) +#define IGC_FLAG_MAS_ENABLE BIT(12) +#define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_EEE BIT(14) +#define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_TSN_QBV_ENABLED BIT(17) +#define IGC_FLAG_TSN_QAV_ENABLED BIT(18) + +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) + +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + +/* Interrupt defines */ +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_4K_ITR 980 +#define IGC_20K_ITR 196 +#define IGC_70K_ITR 56 + +#define IGC_DEFAULT_ITR 3 /* dynamic */ +#define IGC_MAX_ITR_USECS 10000 +#define IGC_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_MSIX_ENTRIES 10 + +/* TX/RX descriptor defines */ +#define IGC_DEFAULT_TXD 256 +#define IGC_DEFAULT_TX_WORK 128 +#define IGC_MIN_TXD 80 +#define IGC_MAX_TXD 4096 + +#define IGC_DEFAULT_RXD 256 +#define IGC_MIN_RXD 80 +#define IGC_MAX_RXD 4096 + +/* Supported Rx Buffer Sizes */ +#define IGC_RXBUFFER_256 256 +#define IGC_RXBUFFER_2048 2048 +#define IGC_RXBUFFER_3072 3072 + +#define AUTO_ALL_MODES 0 +#define IGC_RX_HDR_LEN IGC_RXBUFFER_256 + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGC_I225_TX_LATENCY_10 240 +#define IGC_I225_TX_LATENCY_100 58 +#define IGC_I225_TX_LATENCY_1000 80 +#define IGC_I225_TX_LATENCY_2500 1325 +#define IGC_I225_RX_LATENCY_10 6450 +#define IGC_I225_RX_LATENCY_100 185 +#define IGC_I225_RX_LATENCY_1000 300 +#define IGC_I225_RX_LATENCY_2500 1485 + +/* RX and TX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGC_RX_PTHRESH 8 +#define IGC_RX_HTHRESH 8 +#define IGC_TX_PTHRESH 8 +#define IGC_TX_HTHRESH 1 +#define IGC_RX_WTHRESH 4 +#define IGC_TX_WTHRESH 16 + +#define IGC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define IGC_TS_HDR_LEN 16 + +#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + +#if (PAGE_SIZE < 8192) +#define IGC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) +#else +#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGC_TX_FLAGS_VLAN_SHIFT 16 + +/* igc_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, + __IGC_PTP_TX_IN_PROGRESS, +}; + +enum igc_tx_flags { + /* cmd_type flags */ + IGC_TX_FLAGS_VLAN = 0x01, + IGC_TX_FLAGS_TSO = 0x02, + IGC_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGC_TX_FLAGS_IPV4 = 0x10, + IGC_TX_FLAGS_CSUM = 0x20, +}; + +enum igc_boards { + board_base, +}; + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGC_MAX_TXD_PWR 15 +#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum igc_tx_buffer_type { + IGC_TX_BUFFER_TYPE_SKB, + IGC_TX_BUFFER_TYPE_XDP, + IGC_TX_BUFFER_TYPE_XSK, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igc_tx_buffer_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igc_rx_buffer { + union { + struct { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; + }; + struct xdp_buff *xdp; + }; +}; + +struct igc_q_vector { + struct igc_adapter *adapter; /* backlink */ + void __iomem *itr_register; + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + + struct igc_ring_container rx, tx; + + struct napi_struct napi; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + struct net_device poll_dev; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igc_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = BIT(0), + IGC_FILTER_FLAG_VLAN_TCI = BIT(1), + IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2), + IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3), + IGC_FILTER_FLAG_USER_DATA = BIT(4), + IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5), +}; + +struct igc_nfc_filter { + u8 match_flags; + u16 etype; + __be16 vlan_etype; + u16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; + u8 user_data[8]; + u8 user_mask[8]; + u8 flex_index; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +struct igc_nfc_rule { + struct list_head list; + struct igc_nfc_filter filter; + u32 location; + u16 action; + bool flex; +}; + +/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority + * based, 8 ethertype based and 32 Flex filter based rules. + */ +#define IGC_MAX_RXNFC_RULES 64 + +struct igc_flex_filter { + u8 index; + u8 data[128]; + u8 mask[16]; + u8 length; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +/* igc_desc_unused - calculate if we have unused descriptors */ +static inline u16 igc_desc_unused(const struct igc_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline s32 igc_reset_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +enum igc_ring_flags_t { + IGC_RING_FLAG_RX_3K_BUFFER, + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGC_RING_FLAG_RX_SCTP_CSUM, + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, + IGC_RING_FLAG_TX_CTX_IDX, + IGC_RING_FLAG_TX_DETECT_HANG, + IGC_RING_FLAG_AF_XDP_ZC, +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGC_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; +#endif + return IGC_RXBUFFER_2048; +} + +static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return -EOPNOTSUPP; +} + +void igc_reinit_locked(struct igc_adapter *); +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location); +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); + +void igc_ptp_init(struct igc_adapter *adapter); +void igc_ptp_reset(struct igc_adapter *adapter); +void igc_ptp_suspend(struct igc_adapter *adapter); +void igc_ptp_stop(struct igc_adapter *adapter); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf); +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); + +#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) + +#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) + +#define IGC_RX_DESC(R, i) \ + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) +#define IGC_TX_DESC(R, i) \ + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) +#define IGC_TX_CTXTDESC(R, i) \ + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) + +#endif /* _IGC_H_ */ diff --git a/devices/igc/igc-6.4-ethercat.h b/devices/igc/igc-6.4-ethercat.h index 5f01efdf..107ea63b 100644 --- a/devices/igc/igc-6.4-ethercat.h +++ b/devices/igc/igc-6.4-ethercat.h @@ -17,6 +17,7 @@ #include #include #include +#include #include "igc_hw-6.4-ethercat.h" @@ -267,11 +268,20 @@ struct igc_adapter { } perout[IGC_N_PEROUT]; /* EtherCAT device variables */ - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct igc_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + void igc_up(struct igc_adapter *adapter); void igc_down(struct igc_adapter *adapter); int igc_open(struct net_device *netdev); diff --git a/devices/igc/igc-6.6-ethercat.h b/devices/igc/igc-6.6-ethercat.h index dc1b5813..8131a05d 100644 --- a/devices/igc/igc-6.6-ethercat.h +++ b/devices/igc/igc-6.6-ethercat.h @@ -282,11 +282,20 @@ struct igc_adapter { } perout[IGC_N_PEROUT]; /* EtherCAT device variables */ - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct igc_adapter *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + void igc_up(struct igc_adapter *adapter); void igc_down(struct igc_adapter *adapter); int igc_open(struct net_device *netdev); diff --git a/devices/igc/igc_base-5.14-ethercat.c b/devices/igc/igc_base-5.14-ethercat.c new file mode 100644 index 00000000..b9b1890b --- /dev/null +++ b/devices/igc/igc_base-5.14-ethercat.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw-5.14-ethercat.h" +#include "igc_i225-5.14-ethercat.h" +#include "igc_mac-5.14-ethercat.h" +#include "igc_base-5.14-ethercat.h" +#include "igc-5.14-ethercat.h" + +/** + * igc_reset_hw_base - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_reset_hw_base(struct igc_hw *hw) +{ + s32 ret_val; + u32 ctrl; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(IGC_IMC, 0xffffffff); + + wr32(IGC_RCTL, 0); + wr32(IGC_TCTL, IGC_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(IGC_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST); + + ret_val = igc_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + wr32(IGC_IMC, 0xffffffff); + rd32(IGC_ICR); + + return ret_val; +} + +/** + * igc_init_nvm_params_base - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_nvm_params_base(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(IGC_EECD); + u16 size; + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->type = igc_nvm_eeprom_spi; + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + return 0; +} + +/** + * igc_setup_copper_link_base - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 igc_setup_copper_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + wr32(IGC_CTRL, ctrl); + + ret_val = igc_setup_copper_link(hw); + + return ret_val; +} + +/** + * igc_init_mac_params_base - Init MAC func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_mac_params_base(struct igc_hw *hw) +{ + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; + struct igc_mac_info *mac = &hw->mac; + + /* Set mta register count */ + mac->mta_reg_count = 128; + mac->rar_entry_count = IGC_RAR_ENTRIES; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_base; + + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + if (mac->type == igc_i225) + dev_spec->clear_semaphore_once = true; + + /* physical interface link setup */ + mac->ops.setup_physical_interface = igc_setup_copper_link_base; + + return 0; +} + +/** + * igc_init_phy_params_base - Init PHY func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_phy_params_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + if (hw->phy.media_type != igc_media_type_copper) { + phy->type = igc_phy_none; + goto out; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + phy->reset_delay_us = 100; + + /* set lan id */ + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> + IGC_STATUS_FUNC_SHIFT; + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY\n"); + goto out; + } + + ret_val = igc_get_phy_id(hw); + if (ret_val) + return ret_val; + + igc_check_for_copper_link(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case I225_I_PHY_ID: + phy->type = igc_phy_i225; + break; + default: + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +static s32 igc_get_invariants_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I225_IT: + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_IT: + case IGC_DEV_ID_I221_V: + case IGC_DEV_ID_I226_BLANK_NVM: + case IGC_DEV_ID_I225_BLANK_NVM: + mac->type = igc_i225; + break; + default: + return -IGC_ERR_MAC_INIT; + } + + hw->phy.media_type = igc_media_type_copper; + + /* mac initialization and operations */ + ret_val = igc_init_mac_params_base(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igc_init_nvm_params_base(hw); + switch (hw->mac.type) { + case igc_i225: + ret_val = igc_init_nvm_params_i225(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igc_init_phy_params_base(hw); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u16 i, rar_count = mac->rar_entry_count; + s32 ret_val = 0; + + /* Setup the receive address */ + igc_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igc_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + */ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + wr32(IGC_RFCTL, rfctl); + + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(IGC_RXDCTL(i)); + wr32(IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = rd32(IGC_RLPML); + wr32(IGC_RLPML, 0); + + rctl = rd32(IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + wr32(IGC_RCTL, temp_rctl); + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(IGC_RXDCTL(i), rxdctl[i]); + wr32(IGC_RCTL, rctl); + wrfl(); + + wr32(IGC_RLPML, rlpml); + wr32(IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(IGC_ROC); + rd32(IGC_RNBC); + rd32(IGC_MPC); +} + +static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_copper_link, + .rar_set = igc_rar_set, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, +}; + +static const struct igc_phy_operations igc_phy_ops_base = { + .acquire = igc_acquire_phy_base, + .release = igc_release_phy_base, + .reset = igc_phy_hw_reset, + .read_reg = igc_read_phy_reg_gpy, + .write_reg = igc_write_phy_reg_gpy, +}; + +const struct igc_info igc_base_info = { + .get_invariants = igc_get_invariants_base, + .mac_ops = &igc_mac_ops_base, + .phy_ops = &igc_phy_ops_base, +}; diff --git a/devices/igc/igc_base-5.14-ethercat.h b/devices/igc/igc_base-5.14-ethercat.h new file mode 100644 index 00000000..ce530f5f --- /dev/null +++ b/devices/igc/igc_base-5.14-ethercat.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ + +/* forward declaration */ +void igc_rx_fifo_flush_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define IGC_RAR_ENTRIES 16 + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_base-5.14-orig.c b/devices/igc/igc_base-5.14-orig.c new file mode 100644 index 00000000..d0700d48 --- /dev/null +++ b/devices/igc/igc_base-5.14-orig.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw.h" +#include "igc_i225.h" +#include "igc_mac.h" +#include "igc_base.h" +#include "igc.h" + +/** + * igc_reset_hw_base - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_reset_hw_base(struct igc_hw *hw) +{ + s32 ret_val; + u32 ctrl; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(IGC_IMC, 0xffffffff); + + wr32(IGC_RCTL, 0); + wr32(IGC_TCTL, IGC_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(IGC_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST); + + ret_val = igc_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + wr32(IGC_IMC, 0xffffffff); + rd32(IGC_ICR); + + return ret_val; +} + +/** + * igc_init_nvm_params_base - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_nvm_params_base(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(IGC_EECD); + u16 size; + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->type = igc_nvm_eeprom_spi; + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + return 0; +} + +/** + * igc_setup_copper_link_base - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 igc_setup_copper_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + wr32(IGC_CTRL, ctrl); + + ret_val = igc_setup_copper_link(hw); + + return ret_val; +} + +/** + * igc_init_mac_params_base - Init MAC func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_mac_params_base(struct igc_hw *hw) +{ + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; + struct igc_mac_info *mac = &hw->mac; + + /* Set mta register count */ + mac->mta_reg_count = 128; + mac->rar_entry_count = IGC_RAR_ENTRIES; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_base; + + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + if (mac->type == igc_i225) + dev_spec->clear_semaphore_once = true; + + /* physical interface link setup */ + mac->ops.setup_physical_interface = igc_setup_copper_link_base; + + return 0; +} + +/** + * igc_init_phy_params_base - Init PHY func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_phy_params_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + if (hw->phy.media_type != igc_media_type_copper) { + phy->type = igc_phy_none; + goto out; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + phy->reset_delay_us = 100; + + /* set lan id */ + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> + IGC_STATUS_FUNC_SHIFT; + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY\n"); + goto out; + } + + ret_val = igc_get_phy_id(hw); + if (ret_val) + return ret_val; + + igc_check_for_copper_link(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case I225_I_PHY_ID: + phy->type = igc_phy_i225; + break; + default: + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +static s32 igc_get_invariants_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I225_IT: + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_IT: + case IGC_DEV_ID_I221_V: + case IGC_DEV_ID_I226_BLANK_NVM: + case IGC_DEV_ID_I225_BLANK_NVM: + mac->type = igc_i225; + break; + default: + return -IGC_ERR_MAC_INIT; + } + + hw->phy.media_type = igc_media_type_copper; + + /* mac initialization and operations */ + ret_val = igc_init_mac_params_base(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igc_init_nvm_params_base(hw); + switch (hw->mac.type) { + case igc_i225: + ret_val = igc_init_nvm_params_i225(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igc_init_phy_params_base(hw); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u16 i, rar_count = mac->rar_entry_count; + s32 ret_val = 0; + + /* Setup the receive address */ + igc_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igc_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + */ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + wr32(IGC_RFCTL, rfctl); + + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(IGC_RXDCTL(i)); + wr32(IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = rd32(IGC_RLPML); + wr32(IGC_RLPML, 0); + + rctl = rd32(IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + wr32(IGC_RCTL, temp_rctl); + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(IGC_RXDCTL(i), rxdctl[i]); + wr32(IGC_RCTL, rctl); + wrfl(); + + wr32(IGC_RLPML, rlpml); + wr32(IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(IGC_ROC); + rd32(IGC_RNBC); + rd32(IGC_MPC); +} + +static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_copper_link, + .rar_set = igc_rar_set, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, +}; + +static const struct igc_phy_operations igc_phy_ops_base = { + .acquire = igc_acquire_phy_base, + .release = igc_release_phy_base, + .reset = igc_phy_hw_reset, + .read_reg = igc_read_phy_reg_gpy, + .write_reg = igc_write_phy_reg_gpy, +}; + +const struct igc_info igc_base_info = { + .get_invariants = igc_get_invariants_base, + .mac_ops = &igc_mac_ops_base, + .phy_ops = &igc_phy_ops_base, +}; diff --git a/devices/igc/igc_base-5.14-orig.h b/devices/igc/igc_base-5.14-orig.h new file mode 100644 index 00000000..ce530f5f --- /dev/null +++ b/devices/igc/igc_base-5.14-orig.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ + +/* forward declaration */ +void igc_rx_fifo_flush_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define IGC_RAR_ENTRIES 16 + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_base-6.1-ethercat.c b/devices/igc/igc_base-6.1-ethercat.c new file mode 100644 index 00000000..d2bcbafe --- /dev/null +++ b/devices/igc/igc_base-6.1-ethercat.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw-6.1-ethercat.h" +#include "igc_i225-6.1-ethercat.h" +#include "igc_mac-6.1-ethercat.h" +#include "igc_base-6.1-ethercat.h" +#include "igc-6.1-ethercat.h" + +/** + * igc_reset_hw_base - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_reset_hw_base(struct igc_hw *hw) +{ + s32 ret_val; + u32 ctrl; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(IGC_IMC, 0xffffffff); + + wr32(IGC_RCTL, 0); + wr32(IGC_TCTL, IGC_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(IGC_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + + ret_val = igc_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + wr32(IGC_IMC, 0xffffffff); + rd32(IGC_ICR); + + return ret_val; +} + +/** + * igc_init_nvm_params_base - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_nvm_params_base(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(IGC_EECD); + u16 size; + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->type = igc_nvm_eeprom_spi; + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + return 0; +} + +/** + * igc_setup_copper_link_base - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 igc_setup_copper_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + wr32(IGC_CTRL, ctrl); + + ret_val = igc_setup_copper_link(hw); + + return ret_val; +} + +/** + * igc_init_mac_params_base - Init MAC func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_mac_params_base(struct igc_hw *hw) +{ + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; + struct igc_mac_info *mac = &hw->mac; + + /* Set mta register count */ + mac->mta_reg_count = 128; + mac->rar_entry_count = IGC_RAR_ENTRIES; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_base; + + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + if (mac->type == igc_i225) + dev_spec->clear_semaphore_once = true; + + /* physical interface link setup */ + mac->ops.setup_physical_interface = igc_setup_copper_link_base; + + return 0; +} + +/** + * igc_init_phy_params_base - Init PHY func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_phy_params_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + phy->reset_delay_us = 100; + + /* set lan id */ + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> + IGC_STATUS_FUNC_SHIFT; + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY\n"); + goto out; + } + + ret_val = igc_get_phy_id(hw); + if (ret_val) + return ret_val; + + igc_check_for_copper_link(hw); + +out: + return ret_val; +} + +static s32 igc_get_invariants_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I226_LMVP: + case IGC_DEV_ID_I225_IT: + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_IT: + case IGC_DEV_ID_I221_V: + case IGC_DEV_ID_I226_BLANK_NVM: + case IGC_DEV_ID_I225_BLANK_NVM: + mac->type = igc_i225; + break; + default: + return -IGC_ERR_MAC_INIT; + } + + hw->phy.media_type = igc_media_type_copper; + + /* mac initialization and operations */ + ret_val = igc_init_mac_params_base(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igc_init_nvm_params_base(hw); + switch (hw->mac.type) { + case igc_i225: + ret_val = igc_init_nvm_params_i225(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igc_init_phy_params_base(hw); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u16 i, rar_count = mac->rar_entry_count; + s32 ret_val = 0; + + /* Setup the receive address */ + igc_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igc_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + */ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + wr32(IGC_RFCTL, rfctl); + + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(IGC_RXDCTL(i)); + wr32(IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = rd32(IGC_RLPML); + wr32(IGC_RLPML, 0); + + rctl = rd32(IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + wr32(IGC_RCTL, temp_rctl); + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(IGC_RXDCTL(i), rxdctl[i]); + wr32(IGC_RCTL, rctl); + wrfl(); + + wr32(IGC_RLPML, rlpml); + wr32(IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(IGC_ROC); + rd32(IGC_RNBC); + rd32(IGC_MPC); +} + +static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_copper_link, + .rar_set = igc_rar_set, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, +}; + +static const struct igc_phy_operations igc_phy_ops_base = { + .acquire = igc_acquire_phy_base, + .release = igc_release_phy_base, + .reset = igc_phy_hw_reset, + .read_reg = igc_read_phy_reg_gpy, + .write_reg = igc_write_phy_reg_gpy, +}; + +const struct igc_info igc_base_info = { + .get_invariants = igc_get_invariants_base, + .mac_ops = &igc_mac_ops_base, + .phy_ops = &igc_phy_ops_base, +}; diff --git a/devices/igc/igc_base-6.1-ethercat.h b/devices/igc/igc_base-6.1-ethercat.h new file mode 100644 index 00000000..ce530f5f --- /dev/null +++ b/devices/igc/igc_base-6.1-ethercat.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ + +/* forward declaration */ +void igc_rx_fifo_flush_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define IGC_RAR_ENTRIES 16 + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_base-6.1-orig.c b/devices/igc/igc_base-6.1-orig.c new file mode 100644 index 00000000..a15927e7 --- /dev/null +++ b/devices/igc/igc_base-6.1-orig.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw.h" +#include "igc_i225.h" +#include "igc_mac.h" +#include "igc_base.h" +#include "igc.h" + +/** + * igc_reset_hw_base - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_reset_hw_base(struct igc_hw *hw) +{ + s32 ret_val; + u32 ctrl; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(IGC_IMC, 0xffffffff); + + wr32(IGC_RCTL, 0); + wr32(IGC_TCTL, IGC_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(IGC_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + + ret_val = igc_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + wr32(IGC_IMC, 0xffffffff); + rd32(IGC_ICR); + + return ret_val; +} + +/** + * igc_init_nvm_params_base - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_nvm_params_base(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(IGC_EECD); + u16 size; + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->type = igc_nvm_eeprom_spi; + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + return 0; +} + +/** + * igc_setup_copper_link_base - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 igc_setup_copper_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + wr32(IGC_CTRL, ctrl); + + ret_val = igc_setup_copper_link(hw); + + return ret_val; +} + +/** + * igc_init_mac_params_base - Init MAC func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_mac_params_base(struct igc_hw *hw) +{ + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; + struct igc_mac_info *mac = &hw->mac; + + /* Set mta register count */ + mac->mta_reg_count = 128; + mac->rar_entry_count = IGC_RAR_ENTRIES; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_base; + + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + if (mac->type == igc_i225) + dev_spec->clear_semaphore_once = true; + + /* physical interface link setup */ + mac->ops.setup_physical_interface = igc_setup_copper_link_base; + + return 0; +} + +/** + * igc_init_phy_params_base - Init PHY func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_phy_params_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + phy->reset_delay_us = 100; + + /* set lan id */ + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> + IGC_STATUS_FUNC_SHIFT; + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY\n"); + goto out; + } + + ret_val = igc_get_phy_id(hw); + if (ret_val) + return ret_val; + + igc_check_for_copper_link(hw); + +out: + return ret_val; +} + +static s32 igc_get_invariants_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I226_LMVP: + case IGC_DEV_ID_I225_IT: + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_IT: + case IGC_DEV_ID_I221_V: + case IGC_DEV_ID_I226_BLANK_NVM: + case IGC_DEV_ID_I225_BLANK_NVM: + mac->type = igc_i225; + break; + default: + return -IGC_ERR_MAC_INIT; + } + + hw->phy.media_type = igc_media_type_copper; + + /* mac initialization and operations */ + ret_val = igc_init_mac_params_base(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igc_init_nvm_params_base(hw); + switch (hw->mac.type) { + case igc_i225: + ret_val = igc_init_nvm_params_i225(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igc_init_phy_params_base(hw); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u16 i, rar_count = mac->rar_entry_count; + s32 ret_val = 0; + + /* Setup the receive address */ + igc_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igc_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + */ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + wr32(IGC_RFCTL, rfctl); + + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(IGC_RXDCTL(i)); + wr32(IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = rd32(IGC_RLPML); + wr32(IGC_RLPML, 0); + + rctl = rd32(IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + wr32(IGC_RCTL, temp_rctl); + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(IGC_RXDCTL(i), rxdctl[i]); + wr32(IGC_RCTL, rctl); + wrfl(); + + wr32(IGC_RLPML, rlpml); + wr32(IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(IGC_ROC); + rd32(IGC_RNBC); + rd32(IGC_MPC); +} + +static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_copper_link, + .rar_set = igc_rar_set, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, +}; + +static const struct igc_phy_operations igc_phy_ops_base = { + .acquire = igc_acquire_phy_base, + .release = igc_release_phy_base, + .reset = igc_phy_hw_reset, + .read_reg = igc_read_phy_reg_gpy, + .write_reg = igc_write_phy_reg_gpy, +}; + +const struct igc_info igc_base_info = { + .get_invariants = igc_get_invariants_base, + .mac_ops = &igc_mac_ops_base, + .phy_ops = &igc_phy_ops_base, +}; diff --git a/devices/igc/igc_base-6.1-orig.h b/devices/igc/igc_base-6.1-orig.h new file mode 100644 index 00000000..ce530f5f --- /dev/null +++ b/devices/igc/igc_base-6.1-orig.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ + +/* forward declaration */ +void igc_rx_fifo_flush_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define IGC_RAR_ENTRIES 16 + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_defines-5.14-ethercat.h b/devices/igc/igc_defines-5.14-ethercat.h new file mode 100644 index 00000000..c3a5a551 --- /dev/null +++ b/devices/igc/igc_defines-5.14-ethercat.h @@ -0,0 +1,596 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ + +/* Wake Up Status */ +#define IGC_WUS_EX 0x00000004 /* Directed Exact */ +#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + IGC_WUS_EX | \ + IGC_WUS_ARPD | \ + IGC_WUS_IPV4 | \ + IGC_WUS_IPV6 | \ + IGC_WUS_NSD) + +/* Wake Up Packet Length */ +#define IGC_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define IGC_WUPM_BYTES 128 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/*Blocks new Master requests */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 +/* Status of Master requests. */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAH_RAH_MASK 0x0000FFFF +#define IGC_RAH_ASEL_MASK 0x00030000 +#define IGC_RAH_ASEL_SRC_ADDR BIT(16) +#define IGC_RAH_QSEL_MASK 0x000C0000 +#define IGC_RAH_QSEL_SHIFT 18 +#define IGC_RAH_QSEL_ENABLE BIT(28) +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ + +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 + +/* Device Control */ +#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */ + +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ + +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ + +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 + +/* PBA constants */ +#define IGC_PBA_34K 0x0022 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x1 +#define IGC_SWFW_PHY0_SM 0x2 + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +/* NVM Control */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + +/* Offset to data in NVM read/write registers */ +#define IGC_NVM_RW_REG_DATA 16 +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */ + +/* NVM Word Offsets */ +#define NVM_CHECKSUM_REG 0x003F + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* Collision related configuration parameters */ +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ +#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ +#define IGC_ICR_LSC BIT(2) /* Link Status Change */ +#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ +#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO BIT(6) /* Rx overrun */ +#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ +#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */ +#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ + +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED BIT(31) + +#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ + +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ + +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ + +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 +/* Enable XON frame transmission */ +#define IGC_FCRTL_XONE 0x80000000 + +/* Management Control */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ + +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ + +/* Split Replication Receive Control */ +#define IGC_SRRCTL_TIMESTAMP 0x40000000 +#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14) +#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17) + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ + +#define IGC_RXDEXT_STATERR_LB 0x00040000 + +/* Advanced Receive Descriptor bit definitions */ +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +#define IGC_RXDEXT_STATERR_L4E 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Header split receive */ +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_LEF 0x00040000 + +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ + +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ + +#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */ + +#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ +#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ + +/* Time Sync Interrupt Causes */ +#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ +#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ + +#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS + +#define IGC_FTQF_VF_BP 0x00008000 +#define IGC_FTQF_1588_TIME_STAMP 0x08000000 +#define IGC_FTQF_MASK 0xF0000000 +#define IGC_FTQF_MASK_PROTO_BP 0x10000000 + +/* Time Sync Receive Control bit definitions */ +#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IGC_TSYNCRXCTL_TYPE_ALL 0x08 +#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ +#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */ + +/* Time Sync Receive Configuration */ +#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 + +/* Immediate Interrupt Receive */ +#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +/* Immediate Interrupt Receive Extended */ +#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ + +/* Time Sync Transmit Control bit definitions */ +#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ +#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ +#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ +#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ + +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +/* Transmit Scheduling */ +#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 +#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 + +#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 +#define IGC_TXQCTL_STRICT_CYCLE 0x00000002 +#define IGC_TXQCTL_STRICT_END 0x00000004 + +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* MAC definitions */ +#define IGC_FACTPS_MNGCG 0x20000000 +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ + +/* PHY */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define IGC_GEN_POLL_TIMEOUT 1920 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* Bit definitions for valid PHY IDs. I = Integrated E = External */ +#define I225_I_PHY_ID 0x67C9DC00 + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_INT_EN 0x20000000 +#define IGC_MDIC_ERROR 0x40000000 + +#define IGC_N0_QUEUE -1 + +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLANPQF_QUEUE_MASK 0x03 + +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + +/* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ + +/* LTR defines */ +#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TW_SYSTEM_1000_MASK 0x000000FF +/* Minimum time for 100BASE-T where no data will be transmit following move out + * of EEE LPI Tx state + */ +#define IGC_TW_SYSTEM_100_MASK 0x0000FF00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +#define IGC_DMACR_DMACTHR_MASK 0x00FF0000 +#define IGC_DMACR_DMACTHR_SHIFT 16 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMINV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMINV_SCALE_32768 3 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMAXV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMAXV_SCALE_32768 3 +#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ +#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ +#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMAXV_SCALE_SHIFT 10 + +#endif /* _IGC_DEFINES_H_ */ diff --git a/devices/igc/igc_defines-5.14-orig.h b/devices/igc/igc_defines-5.14-orig.h new file mode 100644 index 00000000..c3a5a551 --- /dev/null +++ b/devices/igc/igc_defines-5.14-orig.h @@ -0,0 +1,596 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ + +/* Wake Up Status */ +#define IGC_WUS_EX 0x00000004 /* Directed Exact */ +#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + IGC_WUS_EX | \ + IGC_WUS_ARPD | \ + IGC_WUS_IPV4 | \ + IGC_WUS_IPV6 | \ + IGC_WUS_NSD) + +/* Wake Up Packet Length */ +#define IGC_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define IGC_WUPM_BYTES 128 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/*Blocks new Master requests */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 +/* Status of Master requests. */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAH_RAH_MASK 0x0000FFFF +#define IGC_RAH_ASEL_MASK 0x00030000 +#define IGC_RAH_ASEL_SRC_ADDR BIT(16) +#define IGC_RAH_QSEL_MASK 0x000C0000 +#define IGC_RAH_QSEL_SHIFT 18 +#define IGC_RAH_QSEL_ENABLE BIT(28) +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ + +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 + +/* Device Control */ +#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */ + +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ + +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ + +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 + +/* PBA constants */ +#define IGC_PBA_34K 0x0022 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x1 +#define IGC_SWFW_PHY0_SM 0x2 + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +/* NVM Control */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + +/* Offset to data in NVM read/write registers */ +#define IGC_NVM_RW_REG_DATA 16 +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */ + +/* NVM Word Offsets */ +#define NVM_CHECKSUM_REG 0x003F + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* Collision related configuration parameters */ +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ +#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ +#define IGC_ICR_LSC BIT(2) /* Link Status Change */ +#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ +#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO BIT(6) /* Rx overrun */ +#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ +#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */ +#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ + +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED BIT(31) + +#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ + +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ + +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ + +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 +/* Enable XON frame transmission */ +#define IGC_FCRTL_XONE 0x80000000 + +/* Management Control */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ + +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ + +/* Split Replication Receive Control */ +#define IGC_SRRCTL_TIMESTAMP 0x40000000 +#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14) +#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17) + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ + +#define IGC_RXDEXT_STATERR_LB 0x00040000 + +/* Advanced Receive Descriptor bit definitions */ +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +#define IGC_RXDEXT_STATERR_L4E 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Header split receive */ +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_LEF 0x00040000 + +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ + +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ + +#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */ + +#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ +#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ + +/* Time Sync Interrupt Causes */ +#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ +#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ + +#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS + +#define IGC_FTQF_VF_BP 0x00008000 +#define IGC_FTQF_1588_TIME_STAMP 0x08000000 +#define IGC_FTQF_MASK 0xF0000000 +#define IGC_FTQF_MASK_PROTO_BP 0x10000000 + +/* Time Sync Receive Control bit definitions */ +#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IGC_TSYNCRXCTL_TYPE_ALL 0x08 +#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ +#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */ + +/* Time Sync Receive Configuration */ +#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 + +/* Immediate Interrupt Receive */ +#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +/* Immediate Interrupt Receive Extended */ +#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ + +/* Time Sync Transmit Control bit definitions */ +#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ +#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ +#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ +#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ + +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +/* Transmit Scheduling */ +#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 +#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 + +#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 +#define IGC_TXQCTL_STRICT_CYCLE 0x00000002 +#define IGC_TXQCTL_STRICT_END 0x00000004 + +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* MAC definitions */ +#define IGC_FACTPS_MNGCG 0x20000000 +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ + +/* PHY */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define IGC_GEN_POLL_TIMEOUT 1920 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* Bit definitions for valid PHY IDs. I = Integrated E = External */ +#define I225_I_PHY_ID 0x67C9DC00 + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_INT_EN 0x20000000 +#define IGC_MDIC_ERROR 0x40000000 + +#define IGC_N0_QUEUE -1 + +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLANPQF_QUEUE_MASK 0x03 + +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + +/* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ + +/* LTR defines */ +#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TW_SYSTEM_1000_MASK 0x000000FF +/* Minimum time for 100BASE-T where no data will be transmit following move out + * of EEE LPI Tx state + */ +#define IGC_TW_SYSTEM_100_MASK 0x0000FF00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +#define IGC_DMACR_DMACTHR_MASK 0x00FF0000 +#define IGC_DMACR_DMACTHR_SHIFT 16 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMINV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMINV_SCALE_32768 3 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMAXV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMAXV_SCALE_32768 3 +#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ +#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ +#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMAXV_SCALE_SHIFT 10 + +#endif /* _IGC_DEFINES_H_ */ diff --git a/devices/igc/igc_defines-6.1-ethercat.h b/devices/igc/igc_defines-6.1-ethercat.h new file mode 100644 index 00000000..dbfa4b9d --- /dev/null +++ b/devices/igc/igc_defines-6.1-ethercat.h @@ -0,0 +1,673 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */ +#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */ +#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */ +#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */ +#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */ +#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */ +#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */ +#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */ +#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */ + +#define IGC_WUFC_FILTER_MASK GENMASK(23, 14) + +#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ + +/* Wake Up Status */ +#define IGC_WUS_EX 0x00000004 /* Directed Exact */ +#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + IGC_WUS_EX | \ + IGC_WUS_ARPD | \ + IGC_WUS_IPV4 | \ + IGC_WUS_IPV6 | \ + IGC_WUS_NSD) + +/* Wake Up Packet Length */ +#define IGC_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define IGC_WUPM_BYTES 128 + +/* Wakeup Filter Control Extended */ +#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */ +#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */ +#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */ +#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */ +#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */ +#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */ +#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */ +#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */ +#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */ +#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */ +#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */ +#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */ +#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */ +#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */ +#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */ +#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */ +#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */ +#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */ +#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */ +#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */ +#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */ +#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */ +#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */ +#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */ + +#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8) + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/*Blocks new Master requests */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 +/* Status of Master requests. */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAH_RAH_MASK 0x0000FFFF +#define IGC_RAH_ASEL_MASK 0x00030000 +#define IGC_RAH_ASEL_SRC_ADDR BIT(16) +#define IGC_RAH_QSEL_MASK 0x000C0000 +#define IGC_RAH_QSEL_SHIFT 18 +#define IGC_RAH_QSEL_ENABLE BIT(28) +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ + +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 + +/* Device Control */ +#define IGC_CTRL_RST 0x04000000 /* Global reset */ + +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ + +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ + +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 + +/* PBA constants */ +#define IGC_PBA_34K 0x0022 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x1 +#define IGC_SWFW_PHY0_SM 0x2 + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +/* NVM Control */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + +/* Offset to data in NVM read/write registers */ +#define IGC_NVM_RW_REG_DATA 16 +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */ + +/* NVM Word Offsets */ +#define NVM_CHECKSUM_REG 0x003F + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* Collision related configuration parameters */ +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ +#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ +#define IGC_ICR_LSC BIT(2) /* Link Status Change */ +#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ +#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO BIT(6) /* Rx overrun */ +#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ +#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */ +#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ + +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED BIT(31) + +#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ + +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ + +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ + +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 +/* Enable XON frame transmission */ +#define IGC_FCRTL_XONE 0x80000000 + +/* Management Control */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ + +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ + +/* Split Replication Receive Control */ +#define IGC_SRRCTL_TIMESTAMP 0x40000000 +#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14) +#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17) + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ + +#define IGC_RXDEXT_STATERR_LB 0x00040000 + +/* Advanced Receive Descriptor bit definitions */ +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +#define IGC_RXDEXT_STATERR_L4E 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Header split receive */ +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_LEF 0x00040000 + +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ + +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ + +#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */ + +#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ +#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ + +/* Time Sync Interrupt Causes */ +#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ +#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ + +#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS + +#define IGC_FTQF_VF_BP 0x00008000 +#define IGC_FTQF_1588_TIME_STAMP 0x08000000 +#define IGC_FTQF_MASK 0xF0000000 +#define IGC_FTQF_MASK_PROTO_BP 0x10000000 + +/* Time Sync Receive Control bit definitions */ +#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IGC_TSYNCRXCTL_TYPE_ALL 0x08 +#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ +#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */ + +/* Time Sync Receive Configuration */ +#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 + +/* Immediate Interrupt Receive */ +#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +/* Immediate Interrupt Receive Extended */ +#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ + +/* Time Sync Transmit Control bit definitions */ +#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ +#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ +#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ +#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ + +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +/* Transmit Scheduling */ +#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 +#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 + +#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 +#define IGC_TXQCTL_STRICT_CYCLE 0x00000002 +#define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 +#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 +#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 + +#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define IGC_TQAVCC_KEEP_CREDITS BIT(30) + +#define IGC_MAX_SR_QUEUES 2 + +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* PCIe PTM Control */ +#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ +#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ +#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) +#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) + +#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ +#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ +#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ + +/* PCIe Digital Delay */ +#define IGC_PCIE_DIG_DELAY_DEFAULT 0x01440000 + +/* PCIe PHY Delay */ +#define IGC_PCIE_PHY_DELAY_DEFAULT 0x40900000 + +#define IGC_TIMADJ_ADJUST_METH 0x40000000 + +/* PCIe PTM Status */ +#define IGC_PTM_STAT_VALID BIT(0) /* PTM Status */ +#define IGC_PTM_STAT_RET_ERR BIT(1) /* Root port timeout */ +#define IGC_PTM_STAT_BAD_PTM_RES BIT(2) /* PTM Response msg instead of PTM Response Data */ +#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ +#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ +#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ + +/* PCIe PTM Cycle Control */ +#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ +#define IGC_PTM_CYCLE_CTRL_AUTO_CYC_EN BIT(31) /* PTM Cycle Control */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* MAC definitions */ +#define IGC_FACTPS_MNGCG 0x20000000 +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ + +/* PHY */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define IGC_GEN_POLL_TIMEOUT 1920 + +/* PHY Control Register */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_ERROR 0x40000000 + +#define IGC_N0_QUEUE -1 + +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLANPQF_QUEUE_MASK 0x03 + +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + +/* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ + +/* LTR defines */ +#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TW_SYSTEM_1000_MASK 0x000000FF +/* Minimum time for 100BASE-T where no data will be transmit following move out + * of EEE LPI Tx state + */ +#define IGC_TW_SYSTEM_100_MASK 0x0000FF00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +#define IGC_DMACR_DMACTHR_MASK 0x00FF0000 +#define IGC_DMACR_DMACTHR_SHIFT 16 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMINV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMINV_SCALE_32768 3 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMAXV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMAXV_SCALE_32768 3 +#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ +#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ +#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMAXV_SCALE_SHIFT 10 + +#endif /* _IGC_DEFINES_H_ */ diff --git a/devices/igc/igc_defines-6.1-orig.h b/devices/igc/igc_defines-6.1-orig.h new file mode 100644 index 00000000..dbfa4b9d --- /dev/null +++ b/devices/igc/igc_defines-6.1-orig.h @@ -0,0 +1,673 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */ +#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */ +#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */ +#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */ +#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */ +#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */ +#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */ +#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */ +#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */ + +#define IGC_WUFC_FILTER_MASK GENMASK(23, 14) + +#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ + +/* Wake Up Status */ +#define IGC_WUS_EX 0x00000004 /* Directed Exact */ +#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + IGC_WUS_EX | \ + IGC_WUS_ARPD | \ + IGC_WUS_IPV4 | \ + IGC_WUS_IPV6 | \ + IGC_WUS_NSD) + +/* Wake Up Packet Length */ +#define IGC_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define IGC_WUPM_BYTES 128 + +/* Wakeup Filter Control Extended */ +#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */ +#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */ +#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */ +#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */ +#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */ +#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */ +#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */ +#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */ +#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */ +#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */ +#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */ +#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */ +#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */ +#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */ +#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */ +#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */ +#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */ +#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */ +#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */ +#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */ +#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */ +#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */ +#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */ +#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */ + +#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8) + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/*Blocks new Master requests */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 +/* Status of Master requests. */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAH_RAH_MASK 0x0000FFFF +#define IGC_RAH_ASEL_MASK 0x00030000 +#define IGC_RAH_ASEL_SRC_ADDR BIT(16) +#define IGC_RAH_QSEL_MASK 0x000C0000 +#define IGC_RAH_QSEL_SHIFT 18 +#define IGC_RAH_QSEL_ENABLE BIT(28) +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ + +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 + +/* Device Control */ +#define IGC_CTRL_RST 0x04000000 /* Global reset */ + +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ + +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ + +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 + +/* PBA constants */ +#define IGC_PBA_34K 0x0022 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x1 +#define IGC_SWFW_PHY0_SM 0x2 + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +/* NVM Control */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + +/* Offset to data in NVM read/write registers */ +#define IGC_NVM_RW_REG_DATA 16 +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */ + +/* NVM Word Offsets */ +#define NVM_CHECKSUM_REG 0x003F + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* Collision related configuration parameters */ +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ +#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ +#define IGC_ICR_LSC BIT(2) /* Link Status Change */ +#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ +#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO BIT(6) /* Rx overrun */ +#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ +#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */ +#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ + +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED BIT(31) + +#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ + +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ + +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ + +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 +/* Enable XON frame transmission */ +#define IGC_FCRTL_XONE 0x80000000 + +/* Management Control */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ + +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ + +/* Split Replication Receive Control */ +#define IGC_SRRCTL_TIMESTAMP 0x40000000 +#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14) +#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17) + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ + +#define IGC_RXDEXT_STATERR_LB 0x00040000 + +/* Advanced Receive Descriptor bit definitions */ +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +#define IGC_RXDEXT_STATERR_L4E 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Header split receive */ +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_LEF 0x00040000 + +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ + +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ + +#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */ + +#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ +#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ + +/* Time Sync Interrupt Causes */ +#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ +#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ + +#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS + +#define IGC_FTQF_VF_BP 0x00008000 +#define IGC_FTQF_1588_TIME_STAMP 0x08000000 +#define IGC_FTQF_MASK 0xF0000000 +#define IGC_FTQF_MASK_PROTO_BP 0x10000000 + +/* Time Sync Receive Control bit definitions */ +#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IGC_TSYNCRXCTL_TYPE_ALL 0x08 +#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ +#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */ + +/* Time Sync Receive Configuration */ +#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 + +/* Immediate Interrupt Receive */ +#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +/* Immediate Interrupt Receive Extended */ +#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ + +/* Time Sync Transmit Control bit definitions */ +#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ +#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ +#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ +#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ + +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +/* Transmit Scheduling */ +#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 +#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 + +#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 +#define IGC_TXQCTL_STRICT_CYCLE 0x00000002 +#define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 +#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 +#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 + +#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define IGC_TQAVCC_KEEP_CREDITS BIT(30) + +#define IGC_MAX_SR_QUEUES 2 + +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* PCIe PTM Control */ +#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ +#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ +#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) +#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) + +#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ +#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ +#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ + +/* PCIe Digital Delay */ +#define IGC_PCIE_DIG_DELAY_DEFAULT 0x01440000 + +/* PCIe PHY Delay */ +#define IGC_PCIE_PHY_DELAY_DEFAULT 0x40900000 + +#define IGC_TIMADJ_ADJUST_METH 0x40000000 + +/* PCIe PTM Status */ +#define IGC_PTM_STAT_VALID BIT(0) /* PTM Status */ +#define IGC_PTM_STAT_RET_ERR BIT(1) /* Root port timeout */ +#define IGC_PTM_STAT_BAD_PTM_RES BIT(2) /* PTM Response msg instead of PTM Response Data */ +#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ +#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ +#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ + +/* PCIe PTM Cycle Control */ +#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ +#define IGC_PTM_CYCLE_CTRL_AUTO_CYC_EN BIT(31) /* PTM Cycle Control */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* MAC definitions */ +#define IGC_FACTPS_MNGCG 0x20000000 +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ + +/* PHY */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define IGC_GEN_POLL_TIMEOUT 1920 + +/* PHY Control Register */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_ERROR 0x40000000 + +#define IGC_N0_QUEUE -1 + +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLANPQF_QUEUE_MASK 0x03 + +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + +/* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ + +/* LTR defines */ +#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TW_SYSTEM_1000_MASK 0x000000FF +/* Minimum time for 100BASE-T where no data will be transmit following move out + * of EEE LPI Tx state + */ +#define IGC_TW_SYSTEM_100_MASK 0x0000FF00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +#define IGC_DMACR_DMACTHR_MASK 0x00FF0000 +#define IGC_DMACR_DMACTHR_SHIFT 16 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMINV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMINV_SCALE_32768 3 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMAXV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMAXV_SCALE_32768 3 +#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ +#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ +#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMAXV_SCALE_SHIFT 10 + +#endif /* _IGC_DEFINES_H_ */ diff --git a/devices/igc/igc_diag-5.14-ethercat.c b/devices/igc/igc_diag-5.14-ethercat.c new file mode 100644 index 00000000..0657fe1b --- /dev/null +++ b/devices/igc/igc_diag-5.14-ethercat.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Intel Corporation */ + +#include "igc-5.14-ethercat.h" +#include "igc_diag-5.14-ethercat.h" + +static struct igc_reg_test reg_test[] = { + { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF }, + { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RA, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RA, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { IGC_MTA, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0} +}; + +static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(reg); + wr32(reg, test_pattern[pat] & write); + val = rd32(reg); + if (val != (test_pattern[pat] & write & mask)) { + netdev_err(adapter->netdev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + } + return true; +} + +static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 val, before; + + before = rd32(reg); + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + netdev_err(adapter->netdev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + return true; +} + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_reg_test *test = reg_test; + struct igc_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle, b = false; + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable. + */ + toggle = 0x6800D3; + before = rd32(IGC_STATUS); + value = before & toggle; + wr32(IGC_STATUS, toggle); + after = rd32(IGC_STATUS) & toggle; + if (value != after) { + netdev_err(adapter->netdev, + "failed STATUS register test got: 0x%08X expected: 0x%08X", + after, value); + *data = 1; + return false; + } + /* restore previous status */ + wr32(IGC_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + } + if (!b) + return false; + } + test++; + } + *data = 0; + return true; +} + +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_hw *hw = &adapter->hw; + + *data = 0; + + if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) { + *data = 1; + return false; + } + + return true; +} + +bool igc_link_test(struct igc_adapter *adapter, u64 *data) +{ + bool link_up; + + *data = 0; + + /* add delay to give enough time for autonegotioation to finish */ + if (adapter->hw.mac.autoneg) + ssleep(5); + + link_up = igc_has_link(adapter); + if (!link_up) { + *data = 1; + return false; + } + + return true; +} diff --git a/devices/igc/igc_diag-5.14-ethercat.h b/devices/igc/igc_diag-5.14-ethercat.h new file mode 100644 index 00000000..600658e3 --- /dev/null +++ b/devices/igc/igc_diag-5.14-ethercat.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data); +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data); +bool igc_link_test(struct igc_adapter *adapter, u64 *data); + +struct igc_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define TABLE32_TEST 3 +#define TABLE64_TEST_LO 4 +#define TABLE64_TEST_HI 5 diff --git a/devices/igc/igc_diag-5.14-orig.c b/devices/igc/igc_diag-5.14-orig.c new file mode 100644 index 00000000..cc621970 --- /dev/null +++ b/devices/igc/igc_diag-5.14-orig.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Intel Corporation */ + +#include "igc.h" +#include "igc_diag.h" + +static struct igc_reg_test reg_test[] = { + { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF }, + { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RA, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RA, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { IGC_MTA, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0} +}; + +static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(reg); + wr32(reg, test_pattern[pat] & write); + val = rd32(reg); + if (val != (test_pattern[pat] & write & mask)) { + netdev_err(adapter->netdev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + } + return true; +} + +static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 val, before; + + before = rd32(reg); + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + netdev_err(adapter->netdev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + return true; +} + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_reg_test *test = reg_test; + struct igc_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle, b = false; + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable. + */ + toggle = 0x6800D3; + before = rd32(IGC_STATUS); + value = before & toggle; + wr32(IGC_STATUS, toggle); + after = rd32(IGC_STATUS) & toggle; + if (value != after) { + netdev_err(adapter->netdev, + "failed STATUS register test got: 0x%08X expected: 0x%08X", + after, value); + *data = 1; + return false; + } + /* restore previous status */ + wr32(IGC_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + } + if (!b) + return false; + } + test++; + } + *data = 0; + return true; +} + +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_hw *hw = &adapter->hw; + + *data = 0; + + if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) { + *data = 1; + return false; + } + + return true; +} + +bool igc_link_test(struct igc_adapter *adapter, u64 *data) +{ + bool link_up; + + *data = 0; + + /* add delay to give enough time for autonegotioation to finish */ + if (adapter->hw.mac.autoneg) + ssleep(5); + + link_up = igc_has_link(adapter); + if (!link_up) { + *data = 1; + return false; + } + + return true; +} diff --git a/devices/igc/igc_diag-5.14-orig.h b/devices/igc/igc_diag-5.14-orig.h new file mode 100644 index 00000000..600658e3 --- /dev/null +++ b/devices/igc/igc_diag-5.14-orig.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data); +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data); +bool igc_link_test(struct igc_adapter *adapter, u64 *data); + +struct igc_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define TABLE32_TEST 3 +#define TABLE64_TEST_LO 4 +#define TABLE64_TEST_HI 5 diff --git a/devices/igc/igc_diag-6.1-ethercat.c b/devices/igc/igc_diag-6.1-ethercat.c new file mode 100644 index 00000000..72bd2326 --- /dev/null +++ b/devices/igc/igc_diag-6.1-ethercat.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Intel Corporation */ + +#include "igc-6.1-ethercat.h" +#include "igc_diag-6.1-ethercat.h" + +static struct igc_reg_test reg_test[] = { + { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF }, + { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RA, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RA, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { IGC_MTA, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0} +}; + +static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(reg); + wr32(reg, test_pattern[pat] & write); + val = rd32(reg); + if (val != (test_pattern[pat] & write & mask)) { + netdev_err(adapter->netdev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + } + return true; +} + +static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 val, before; + + before = rd32(reg); + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + netdev_err(adapter->netdev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + return true; +} + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_reg_test *test = reg_test; + struct igc_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle, b = false; + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable. + */ + toggle = 0x6800D3; + before = rd32(IGC_STATUS); + value = before & toggle; + wr32(IGC_STATUS, toggle); + after = rd32(IGC_STATUS) & toggle; + if (value != after) { + netdev_err(adapter->netdev, + "failed STATUS register test got: 0x%08X expected: 0x%08X", + after, value); + *data = 1; + return false; + } + /* restore previous status */ + wr32(IGC_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + } + if (!b) + return false; + } + test++; + } + *data = 0; + return true; +} + +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_hw *hw = &adapter->hw; + + *data = 0; + + if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) { + *data = 1; + return false; + } + + return true; +} + +bool igc_link_test(struct igc_adapter *adapter, u64 *data) +{ + bool link_up; + + *data = 0; + + /* add delay to give enough time for autonegotioation to finish */ + if (adapter->hw.mac.autoneg) + ssleep(5); + + link_up = igc_has_link(adapter); + if (!link_up) { + *data = 1; + return false; + } + + return true; +} diff --git a/devices/igc/igc_diag-6.1-ethercat.h b/devices/igc/igc_diag-6.1-ethercat.h new file mode 100644 index 00000000..600658e3 --- /dev/null +++ b/devices/igc/igc_diag-6.1-ethercat.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data); +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data); +bool igc_link_test(struct igc_adapter *adapter, u64 *data); + +struct igc_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define TABLE32_TEST 3 +#define TABLE64_TEST_LO 4 +#define TABLE64_TEST_HI 5 diff --git a/devices/igc/igc_diag-6.1-orig.c b/devices/igc/igc_diag-6.1-orig.c new file mode 100644 index 00000000..cc621970 --- /dev/null +++ b/devices/igc/igc_diag-6.1-orig.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Intel Corporation */ + +#include "igc.h" +#include "igc_diag.h" + +static struct igc_reg_test reg_test[] = { + { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF }, + { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RA, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RA, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { IGC_MTA, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0} +}; + +static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(reg); + wr32(reg, test_pattern[pat] & write); + val = rd32(reg); + if (val != (test_pattern[pat] & write & mask)) { + netdev_err(adapter->netdev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + } + return true; +} + +static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 val, before; + + before = rd32(reg); + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + netdev_err(adapter->netdev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + return true; +} + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_reg_test *test = reg_test; + struct igc_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle, b = false; + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable. + */ + toggle = 0x6800D3; + before = rd32(IGC_STATUS); + value = before & toggle; + wr32(IGC_STATUS, toggle); + after = rd32(IGC_STATUS) & toggle; + if (value != after) { + netdev_err(adapter->netdev, + "failed STATUS register test got: 0x%08X expected: 0x%08X", + after, value); + *data = 1; + return false; + } + /* restore previous status */ + wr32(IGC_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + } + if (!b) + return false; + } + test++; + } + *data = 0; + return true; +} + +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_hw *hw = &adapter->hw; + + *data = 0; + + if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) { + *data = 1; + return false; + } + + return true; +} + +bool igc_link_test(struct igc_adapter *adapter, u64 *data) +{ + bool link_up; + + *data = 0; + + /* add delay to give enough time for autonegotioation to finish */ + if (adapter->hw.mac.autoneg) + ssleep(5); + + link_up = igc_has_link(adapter); + if (!link_up) { + *data = 1; + return false; + } + + return true; +} diff --git a/devices/igc/igc_diag-6.1-orig.h b/devices/igc/igc_diag-6.1-orig.h new file mode 100644 index 00000000..600658e3 --- /dev/null +++ b/devices/igc/igc_diag-6.1-orig.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data); +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data); +bool igc_link_test(struct igc_adapter *adapter, u64 *data); + +struct igc_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define TABLE32_TEST 3 +#define TABLE64_TEST_LO 4 +#define TABLE64_TEST_HI 5 diff --git a/devices/igc/igc_dump-5.14-ethercat.c b/devices/igc/igc_dump-5.14-ethercat.c new file mode 100644 index 00000000..97330f1a --- /dev/null +++ b/devices/igc/igc_dump-5.14-ethercat.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc-5.14-ethercat.h" + +struct igc_reg_info { + u32 ofs; + char *name; +}; + +static const struct igc_reg_info igc_reg_info_tbl[] = { + /* General Registers */ + {IGC_CTRL, "CTRL"}, + {IGC_STATUS, "STATUS"}, + {IGC_CTRL_EXT, "CTRL_EXT"}, + {IGC_MDIC, "MDIC"}, + + /* Interrupt Registers */ + {IGC_ICR, "ICR"}, + + /* RX Registers */ + {IGC_RCTL, "RCTL"}, + {IGC_RDLEN(0), "RDLEN"}, + {IGC_RDH(0), "RDH"}, + {IGC_RDT(0), "RDT"}, + {IGC_RXDCTL(0), "RXDCTL"}, + {IGC_RDBAL(0), "RDBAL"}, + {IGC_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IGC_TCTL, "TCTL"}, + {IGC_TDBAL(0), "TDBAL"}, + {IGC_TDBAH(0), "TDBAH"}, + {IGC_TDLEN(0), "TDLEN"}, + {IGC_TDH(0), "TDH"}, + {IGC_TDT(0), "TDT"}, + {IGC_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + +/* igc_regdump - register printout routine */ +static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) +{ + struct net_device *dev = igc_get_hw_dev(hw); + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case IGC_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDLEN(n)); + break; + case IGC_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDH(n)); + break; + case IGC_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDT(n)); + break; + case IGC_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RXDCTL(n)); + break; + case IGC_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAH(n)); + break; + case IGC_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAL(n)); + break; + case IGC_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAH(n)); + break; + case IGC_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDLEN(n)); + break; + case IGC_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDH(n)); + break; + case IGC_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDT(n)); + break; + case IGC_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TXDCTL(n)); + break; + default: + netdev_info(dev, "%-15s %08x\n", reginfo->name, + rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igc_rings_dump - Tx-rings and Rx-rings */ +void igc_rings_dump(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct my_u0 { __le64 a; __le64 b; } *u0; + union igc_adv_tx_desc *tx_desc; + union igc_adv_rx_desc *rx_desc; + struct igc_ring *tx_ring; + struct igc_ring *rx_ring; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n", + netdev->state, dev_trans_start(netdev)); + + /* Print TX Ring Summary */ + if (!netif_running(netdev)) + goto exit; + + netdev_info(netdev, "TX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igc_tx_buffer *buffer_info; + + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + + netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + netdev_info(netdev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "TX QUEUE INDEX = %d\n", + tx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igc_tx_buffer *buffer_info; + + tx_desc = IGC_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + netdev_info(netdev, "RX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, + rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + netdev_info(netdev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "RX QUEUE INDEX = %d\n", + rx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igc_rx_buffer *buffer_info; + + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGC_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & IGC_RXD_STAT_DD) { + /* Descriptor Done */ + netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address + (buffer_info->page) + + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + true); + } + } + } + } + +exit: + return; +} + +/* igc_regs_dump - registers dump */ +void igc_regs_dump(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + struct igc_reg_info *reginfo; + + /* Print Registers */ + netdev_info(adapter->netdev, "Register Dump\n"); + netdev_info(adapter->netdev, "Register Name Value\n"); + for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; + reginfo->name; reginfo++) { + igc_regdump(hw, reginfo); + } +} diff --git a/devices/igc/igc_dump-5.14-orig.c b/devices/igc/igc_dump-5.14-orig.c new file mode 100644 index 00000000..c09c95cc --- /dev/null +++ b/devices/igc/igc_dump-5.14-orig.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc.h" + +struct igc_reg_info { + u32 ofs; + char *name; +}; + +static const struct igc_reg_info igc_reg_info_tbl[] = { + /* General Registers */ + {IGC_CTRL, "CTRL"}, + {IGC_STATUS, "STATUS"}, + {IGC_CTRL_EXT, "CTRL_EXT"}, + {IGC_MDIC, "MDIC"}, + + /* Interrupt Registers */ + {IGC_ICR, "ICR"}, + + /* RX Registers */ + {IGC_RCTL, "RCTL"}, + {IGC_RDLEN(0), "RDLEN"}, + {IGC_RDH(0), "RDH"}, + {IGC_RDT(0), "RDT"}, + {IGC_RXDCTL(0), "RXDCTL"}, + {IGC_RDBAL(0), "RDBAL"}, + {IGC_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IGC_TCTL, "TCTL"}, + {IGC_TDBAL(0), "TDBAL"}, + {IGC_TDBAH(0), "TDBAH"}, + {IGC_TDLEN(0), "TDLEN"}, + {IGC_TDH(0), "TDH"}, + {IGC_TDT(0), "TDT"}, + {IGC_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + +/* igc_regdump - register printout routine */ +static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) +{ + struct net_device *dev = igc_get_hw_dev(hw); + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case IGC_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDLEN(n)); + break; + case IGC_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDH(n)); + break; + case IGC_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDT(n)); + break; + case IGC_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RXDCTL(n)); + break; + case IGC_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAH(n)); + break; + case IGC_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAL(n)); + break; + case IGC_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAH(n)); + break; + case IGC_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDLEN(n)); + break; + case IGC_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDH(n)); + break; + case IGC_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDT(n)); + break; + case IGC_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TXDCTL(n)); + break; + default: + netdev_info(dev, "%-15s %08x\n", reginfo->name, + rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igc_rings_dump - Tx-rings and Rx-rings */ +void igc_rings_dump(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct my_u0 { __le64 a; __le64 b; } *u0; + union igc_adv_tx_desc *tx_desc; + union igc_adv_rx_desc *rx_desc; + struct igc_ring *tx_ring; + struct igc_ring *rx_ring; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n", + netdev->state, dev_trans_start(netdev)); + + /* Print TX Ring Summary */ + if (!netif_running(netdev)) + goto exit; + + netdev_info(netdev, "TX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igc_tx_buffer *buffer_info; + + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + + netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + netdev_info(netdev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "TX QUEUE INDEX = %d\n", + tx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igc_tx_buffer *buffer_info; + + tx_desc = IGC_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + netdev_info(netdev, "RX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, + rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + netdev_info(netdev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "RX QUEUE INDEX = %d\n", + rx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igc_rx_buffer *buffer_info; + + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGC_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & IGC_RXD_STAT_DD) { + /* Descriptor Done */ + netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address + (buffer_info->page) + + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + true); + } + } + } + } + +exit: + return; +} + +/* igc_regs_dump - registers dump */ +void igc_regs_dump(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + struct igc_reg_info *reginfo; + + /* Print Registers */ + netdev_info(adapter->netdev, "Register Dump\n"); + netdev_info(adapter->netdev, "Register Name Value\n"); + for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; + reginfo->name; reginfo++) { + igc_regdump(hw, reginfo); + } +} diff --git a/devices/igc/igc_dump-6.1-ethercat.c b/devices/igc/igc_dump-6.1-ethercat.c new file mode 100644 index 00000000..d621069d --- /dev/null +++ b/devices/igc/igc_dump-6.1-ethercat.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc-6.1-ethercat.h" + +struct igc_reg_info { + u32 ofs; + char *name; +}; + +static const struct igc_reg_info igc_reg_info_tbl[] = { + /* General Registers */ + {IGC_CTRL, "CTRL"}, + {IGC_STATUS, "STATUS"}, + {IGC_CTRL_EXT, "CTRL_EXT"}, + {IGC_MDIC, "MDIC"}, + + /* Interrupt Registers */ + {IGC_ICR, "ICR"}, + + /* RX Registers */ + {IGC_RCTL, "RCTL"}, + {IGC_RDLEN(0), "RDLEN"}, + {IGC_RDH(0), "RDH"}, + {IGC_RDT(0), "RDT"}, + {IGC_RXDCTL(0), "RXDCTL"}, + {IGC_RDBAL(0), "RDBAL"}, + {IGC_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IGC_TCTL, "TCTL"}, + {IGC_TDBAL(0), "TDBAL"}, + {IGC_TDBAH(0), "TDBAH"}, + {IGC_TDLEN(0), "TDLEN"}, + {IGC_TDH(0), "TDH"}, + {IGC_TDT(0), "TDT"}, + {IGC_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + +/* igc_regdump - register printout routine */ +static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) +{ + struct net_device *dev = igc_get_hw_dev(hw); + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case IGC_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDLEN(n)); + break; + case IGC_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDH(n)); + break; + case IGC_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDT(n)); + break; + case IGC_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RXDCTL(n)); + break; + case IGC_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAH(n)); + break; + case IGC_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAL(n)); + break; + case IGC_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAH(n)); + break; + case IGC_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDLEN(n)); + break; + case IGC_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDH(n)); + break; + case IGC_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDT(n)); + break; + case IGC_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TXDCTL(n)); + break; + default: + netdev_info(dev, "%-15s %08x\n", reginfo->name, + rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igc_rings_dump - Tx-rings and Rx-rings */ +void igc_rings_dump(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct my_u0 { __le64 a; __le64 b; } *u0; + union igc_adv_tx_desc *tx_desc; + union igc_adv_rx_desc *rx_desc; + struct igc_ring *tx_ring; + struct igc_ring *rx_ring; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n", + netdev->state, dev_trans_start(netdev)); + + /* Print TX Ring Summary */ + if (!netif_running(netdev)) + goto exit; + + netdev_info(netdev, "TX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igc_tx_buffer *buffer_info; + + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + + netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + netdev_info(netdev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "TX QUEUE INDEX = %d\n", + tx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igc_tx_buffer *buffer_info; + + tx_desc = IGC_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + netdev_info(netdev, "RX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, + rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + netdev_info(netdev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "RX QUEUE INDEX = %d\n", + rx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igc_rx_buffer *buffer_info; + + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGC_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & IGC_RXD_STAT_DD) { + /* Descriptor Done */ + netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address + (buffer_info->page) + + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + true); + } + } + } + } + +exit: + return; +} + +/* igc_regs_dump - registers dump */ +void igc_regs_dump(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + struct igc_reg_info *reginfo; + + /* Print Registers */ + netdev_info(adapter->netdev, "Register Dump\n"); + netdev_info(adapter->netdev, "Register Name Value\n"); + for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; + reginfo->name; reginfo++) { + igc_regdump(hw, reginfo); + } +} diff --git a/devices/igc/igc_dump-6.1-orig.c b/devices/igc/igc_dump-6.1-orig.c new file mode 100644 index 00000000..c09c95cc --- /dev/null +++ b/devices/igc/igc_dump-6.1-orig.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc.h" + +struct igc_reg_info { + u32 ofs; + char *name; +}; + +static const struct igc_reg_info igc_reg_info_tbl[] = { + /* General Registers */ + {IGC_CTRL, "CTRL"}, + {IGC_STATUS, "STATUS"}, + {IGC_CTRL_EXT, "CTRL_EXT"}, + {IGC_MDIC, "MDIC"}, + + /* Interrupt Registers */ + {IGC_ICR, "ICR"}, + + /* RX Registers */ + {IGC_RCTL, "RCTL"}, + {IGC_RDLEN(0), "RDLEN"}, + {IGC_RDH(0), "RDH"}, + {IGC_RDT(0), "RDT"}, + {IGC_RXDCTL(0), "RXDCTL"}, + {IGC_RDBAL(0), "RDBAL"}, + {IGC_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IGC_TCTL, "TCTL"}, + {IGC_TDBAL(0), "TDBAL"}, + {IGC_TDBAH(0), "TDBAH"}, + {IGC_TDLEN(0), "TDLEN"}, + {IGC_TDH(0), "TDH"}, + {IGC_TDT(0), "TDT"}, + {IGC_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + +/* igc_regdump - register printout routine */ +static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) +{ + struct net_device *dev = igc_get_hw_dev(hw); + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case IGC_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDLEN(n)); + break; + case IGC_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDH(n)); + break; + case IGC_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDT(n)); + break; + case IGC_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RXDCTL(n)); + break; + case IGC_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAH(n)); + break; + case IGC_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAL(n)); + break; + case IGC_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAH(n)); + break; + case IGC_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDLEN(n)); + break; + case IGC_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDH(n)); + break; + case IGC_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDT(n)); + break; + case IGC_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TXDCTL(n)); + break; + default: + netdev_info(dev, "%-15s %08x\n", reginfo->name, + rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igc_rings_dump - Tx-rings and Rx-rings */ +void igc_rings_dump(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct my_u0 { __le64 a; __le64 b; } *u0; + union igc_adv_tx_desc *tx_desc; + union igc_adv_rx_desc *rx_desc; + struct igc_ring *tx_ring; + struct igc_ring *rx_ring; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n", + netdev->state, dev_trans_start(netdev)); + + /* Print TX Ring Summary */ + if (!netif_running(netdev)) + goto exit; + + netdev_info(netdev, "TX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igc_tx_buffer *buffer_info; + + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + + netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + netdev_info(netdev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "TX QUEUE INDEX = %d\n", + tx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igc_tx_buffer *buffer_info; + + tx_desc = IGC_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + netdev_info(netdev, "RX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, + rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + netdev_info(netdev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "RX QUEUE INDEX = %d\n", + rx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igc_rx_buffer *buffer_info; + + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGC_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & IGC_RXD_STAT_DD) { + /* Descriptor Done */ + netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address + (buffer_info->page) + + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + true); + } + } + } + } + +exit: + return; +} + +/* igc_regs_dump - registers dump */ +void igc_regs_dump(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + struct igc_reg_info *reginfo; + + /* Print Registers */ + netdev_info(adapter->netdev, "Register Dump\n"); + netdev_info(adapter->netdev, "Register Name Value\n"); + for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; + reginfo->name; reginfo++) { + igc_regdump(hw, reginfo); + } +} diff --git a/devices/igc/igc_ethtool-5.14-ethercat.c b/devices/igc/igc_ethtool-5.14-ethercat.c new file mode 100644 index 00000000..bd5ecaa8 --- /dev/null +++ b/devices/igc/igc_ethtool-5.14-ethercat.c @@ -0,0 +1,1986 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include +#include +#include + +#include "igc-5.14-ethercat.h" +#include "igc_diag-5.14-ethercat.h" + +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u16 nvm_version = 0; + u16 gphy_version; + + strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + + /* NVM image version is reported as firmware version for i225 device */ + hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version); + + /* gPHY firmware version is reported as PHY FW version */ + gphy_version = igc_read_phy_fw_version(hw); + + scnprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%x:%x", + nvm_version, + gphy_version); + + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_ethtool_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); + + /* XXX: Due to a bug few lines above, RAL and RAH registers are + * overwritten. To preserve the ABI, we write these registers again in + * regs_buff. + */ + for (i = 0; i < 16; i++) + regs_buff[172 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[188 + i] = rd32(IGC_RAH(i)); + + regs_buff[204] = rd32(IGC_VLANPQF); + + for (i = 0; i < 8; i++) + regs_buff[205 + i] = rd32(IGC_ETQF(i)); + + regs_buff[213] = adapter->stats.tlpic; + regs_buff[214] = adapter->stats.rlpic; +} + +static void igc_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & IGC_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IGC_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IGC_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IGC_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & IGC_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igc_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IGC_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IGC_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IGC_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IGC_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= IGC_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static u32 igc_ethtool_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_ethtool_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_ethtool_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_ethtool_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void igc_ethtool_get_ringparam(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int igc_ethtool_set_ringparam(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_ethtool_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_ethtool_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igc_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static int igc_ethtool_get_coalesce(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 4 + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igc_ethtool_set_coalesce(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 4 + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_rule *rule = NULL; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) + goto out; + + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +out: + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; +} + +static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_rule *rule; + int cnt = 0; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (cnt == cmd->rule_cnt) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EMSGSIZE; + } + rule_locs[cnt] = rule->location; + cnt++; + } + + mutex_unlock(&adapter->nfc_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + return 0; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_rule_count; + return 0; + case ETHTOOL_GRXCLSRULE: + return igc_ethtool_get_nfc_rule(adapter, cmd); + case ETHTOOL_GRXCLSRLALL: + return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); + case ETHTOOL_GRXFH: + return igc_ethtool_get_rss_hash_opts(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + netdev_err(adapter->netdev, + "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + const struct ethtool_rx_flow_spec *fsp) +{ + INIT_LIST_HEAD(&rule->list); + + rule->action = fsp->ring_cookie; + rule->location = fsp->location; + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); + rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Both source and destination address filters only support the full + * mask. + */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(rule->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(rule->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } +} + +/** + * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid + * @adapter: Pointer to adapter + * @rule: Rule under evaluation + * + * The driver doesn't support rules with multiple matches so if more than + * one bit in filter flags is set, @rule is considered invalid. + * + * Also, if there is already another rule with the same filter in a different + * location, @rule is considered invalid. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct net_device *dev = adapter->netdev; + u8 flags = rule->filter.match_flags; + struct igc_nfc_rule *tmp; + + if (!flags) { + netdev_dbg(dev, "Rule with no match\n"); + return -EINVAL; + } + + if (flags & (flags - 1)) { + netdev_dbg(dev, "Rule with multiple matches not supported\n"); + return -EOPNOTSUPP; + } + + list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { + if (!memcmp(&rule->filter, &tmp->filter, + sizeof(rule->filter)) && + tmp->location != rule->location) { + netdev_dbg(dev, "Rule already exists\n"); + return -EEXIST; + } + } + + return 0; +} + +static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule, *old_rule; + int err; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) { + netdev_dbg(netdev, "N-tuple filters disabled\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { + netdev_dbg(netdev, "Only ethernet flow type is supported\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & FLOW_EXT) && + fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + netdev_dbg(netdev, "VLAN mask not supported\n"); + return -EOPNOTSUPP; + } + + if (fsp->ring_cookie >= adapter->num_rx_queues) { + netdev_dbg(netdev, "Invalid action\n"); + return -EINVAL; + } + + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + igc_ethtool_init_nfc_rule(rule, fsp); + + mutex_lock(&adapter->nfc_rule_lock); + + err = igc_ethtool_check_nfc_rule(adapter, rule); + if (err) + goto err; + + old_rule = igc_get_nfc_rule(adapter, fsp->location); + if (old_rule) + igc_del_nfc_rule(adapter, old_rule); + + err = igc_add_nfc_rule(adapter, rule); + if (err) + goto err; + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +err: + mutex_unlock(&adapter->nfc_rule_lock); + kfree(rule); + return err; +} + +static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; + } + + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; +} + +static int igc_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return igc_ethtool_set_rss_hash_opt(adapter, cmd); + case ETHTOOL_SRXCLSRLINS: + return igc_ethtool_add_nfc_rule(adapter, cmd); + case ETHTOOL_SRXCLSRLDEL: + return igc_ethtool_del_nfc_rule(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static void igc_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_get_max_rss_queues(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_ethtool_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_get_max_rss_queues(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static int igc_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case igc_i225: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 eeer; + + if (hw->dev_spec._base.eee_enable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + *edata = adapter->eee; + edata->supported = SUPPORTED_Autoneg; + + eeer = rd32(IGC_EEER); + + /* EEE status on negotiated link */ + if (eeer & IGC_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & IGC_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + + edata->eee_enabled = hw->dev_spec._base.eee_enable; + + edata->advertised = SUPPORTED_Autoneg; + edata->lp_advertised = SUPPORTED_Autoneg; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igc_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igc_ethtool_get_eee(netdev, &eee_curr); + if (ret_val) { + netdev_err(netdev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + netdev_err(netdev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + netdev_err(netdev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + } else if (!edata->eee_enabled) { + netdev_err(netdev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { + hw->dev_spec._base.eee_enable = edata->eee_enabled; + adapter->flags |= IGC_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + + /* advertising link modes */ + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + break; + } + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + } else { + speed = SPEED_1000; + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int +igc_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + netdev_err(dev, "Cannot change link characteristics when reset is active\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + netdev_info(dev, "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static void igc_ethtool_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(adapter->netdev, "Offline testing starting"); + set_bit(__IGC_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + igc_close(netdev); + else + igc_reset(adapter); + + netdev_info(adapter->netdev, "Register testing starting"); + if (!igc_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + netdev_info(adapter->netdev, "EEPROM testing starting"); + if (!igc_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + /* loopback and interrupt tests + * will be implemented in the future + */ + data[TEST_LOOP] = 0; + data[TEST_IRQ] = 0; + + clear_bit(__IGC_TESTING, &adapter->state); + if (if_running) + igc_open(netdev); + } else { + netdev_info(adapter->netdev, "Online testing starting"); + + /* register, eeprom, intr and loopback tests not run online */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + msleep_interruptible(4 * 1000); +} + +static const struct ethtool_ops igc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igc_ethtool_get_drvinfo, + .get_regs_len = igc_ethtool_get_regs_len, + .get_regs = igc_ethtool_get_regs, + .get_wol = igc_ethtool_get_wol, + .set_wol = igc_ethtool_set_wol, + .get_msglevel = igc_ethtool_get_msglevel, + .set_msglevel = igc_ethtool_set_msglevel, + .nway_reset = igc_ethtool_nway_reset, + .get_link = igc_ethtool_get_link, + .get_eeprom_len = igc_ethtool_get_eeprom_len, + .get_eeprom = igc_ethtool_get_eeprom, + .set_eeprom = igc_ethtool_set_eeprom, + .get_ringparam = igc_ethtool_get_ringparam, + .set_ringparam = igc_ethtool_set_ringparam, + .get_pauseparam = igc_ethtool_get_pauseparam, + .set_pauseparam = igc_ethtool_set_pauseparam, + .get_strings = igc_ethtool_get_strings, + .get_sset_count = igc_ethtool_get_sset_count, + .get_ethtool_stats = igc_ethtool_get_stats, + .get_coalesce = igc_ethtool_get_coalesce, + .set_coalesce = igc_ethtool_set_coalesce, + .get_rxnfc = igc_ethtool_get_rxnfc, + .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, + .get_rxfh = igc_ethtool_get_rxfh, + .set_rxfh = igc_ethtool_set_rxfh, + .get_ts_info = igc_ethtool_get_ts_info, + .get_channels = igc_ethtool_get_channels, + .set_channels = igc_ethtool_set_channels, + .get_priv_flags = igc_ethtool_get_priv_flags, + .set_priv_flags = igc_ethtool_set_priv_flags, + .get_eee = igc_ethtool_get_eee, + .set_eee = igc_ethtool_set_eee, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_ethtool_get_link_ksettings, + .set_link_ksettings = igc_ethtool_set_link_ksettings, + .self_test = igc_ethtool_diag_test, +}; + +void igc_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/devices/igc/igc_ethtool-5.14-orig.c b/devices/igc/igc_ethtool-5.14-orig.c new file mode 100644 index 00000000..fa417186 --- /dev/null +++ b/devices/igc/igc_ethtool-5.14-orig.c @@ -0,0 +1,1951 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include +#include +#include + +#include "igc.h" +#include "igc_diag.h" + +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u16 nvm_version = 0; + u16 gphy_version; + + strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + + /* NVM image version is reported as firmware version for i225 device */ + hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version); + + /* gPHY firmware version is reported as PHY FW version */ + gphy_version = igc_read_phy_fw_version(hw); + + scnprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%x:%x", + nvm_version, + gphy_version); + + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_ethtool_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); + + /* XXX: Due to a bug few lines above, RAL and RAH registers are + * overwritten. To preserve the ABI, we write these registers again in + * regs_buff. + */ + for (i = 0; i < 16; i++) + regs_buff[172 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[188 + i] = rd32(IGC_RAH(i)); + + regs_buff[204] = rd32(IGC_VLANPQF); + + for (i = 0; i < 8; i++) + regs_buff[205 + i] = rd32(IGC_ETQF(i)); + + regs_buff[213] = adapter->stats.tlpic; + regs_buff[214] = adapter->stats.rlpic; +} + +static void igc_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & IGC_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IGC_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IGC_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IGC_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & IGC_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igc_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IGC_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IGC_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IGC_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IGC_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= IGC_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static u32 igc_ethtool_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_ethtool_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_ethtool_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_ethtool_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_ethtool_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_ethtool_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igc_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static int igc_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igc_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_rule *rule = NULL; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) + goto out; + + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +out: + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; +} + +static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_rule *rule; + int cnt = 0; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (cnt == cmd->rule_cnt) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EMSGSIZE; + } + rule_locs[cnt] = rule->location; + cnt++; + } + + mutex_unlock(&adapter->nfc_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + return 0; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_rule_count; + return 0; + case ETHTOOL_GRXCLSRULE: + return igc_ethtool_get_nfc_rule(adapter, cmd); + case ETHTOOL_GRXCLSRLALL: + return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); + case ETHTOOL_GRXFH: + return igc_ethtool_get_rss_hash_opts(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + netdev_err(adapter->netdev, + "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + const struct ethtool_rx_flow_spec *fsp) +{ + INIT_LIST_HEAD(&rule->list); + + rule->action = fsp->ring_cookie; + rule->location = fsp->location; + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); + rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Both source and destination address filters only support the full + * mask. + */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(rule->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(rule->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } +} + +/** + * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid + * @adapter: Pointer to adapter + * @rule: Rule under evaluation + * + * The driver doesn't support rules with multiple matches so if more than + * one bit in filter flags is set, @rule is considered invalid. + * + * Also, if there is already another rule with the same filter in a different + * location, @rule is considered invalid. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct net_device *dev = adapter->netdev; + u8 flags = rule->filter.match_flags; + struct igc_nfc_rule *tmp; + + if (!flags) { + netdev_dbg(dev, "Rule with no match\n"); + return -EINVAL; + } + + if (flags & (flags - 1)) { + netdev_dbg(dev, "Rule with multiple matches not supported\n"); + return -EOPNOTSUPP; + } + + list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { + if (!memcmp(&rule->filter, &tmp->filter, + sizeof(rule->filter)) && + tmp->location != rule->location) { + netdev_dbg(dev, "Rule already exists\n"); + return -EEXIST; + } + } + + return 0; +} + +static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule, *old_rule; + int err; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) { + netdev_dbg(netdev, "N-tuple filters disabled\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { + netdev_dbg(netdev, "Only ethernet flow type is supported\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & FLOW_EXT) && + fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + netdev_dbg(netdev, "VLAN mask not supported\n"); + return -EOPNOTSUPP; + } + + if (fsp->ring_cookie >= adapter->num_rx_queues) { + netdev_dbg(netdev, "Invalid action\n"); + return -EINVAL; + } + + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + igc_ethtool_init_nfc_rule(rule, fsp); + + mutex_lock(&adapter->nfc_rule_lock); + + err = igc_ethtool_check_nfc_rule(adapter, rule); + if (err) + goto err; + + old_rule = igc_get_nfc_rule(adapter, fsp->location); + if (old_rule) + igc_del_nfc_rule(adapter, old_rule); + + err = igc_add_nfc_rule(adapter, rule); + if (err) + goto err; + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +err: + mutex_unlock(&adapter->nfc_rule_lock); + kfree(rule); + return err; +} + +static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; + } + + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; +} + +static int igc_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return igc_ethtool_set_rss_hash_opt(adapter, cmd); + case ETHTOOL_SRXCLSRLINS: + return igc_ethtool_add_nfc_rule(adapter, cmd); + case ETHTOOL_SRXCLSRLDEL: + return igc_ethtool_del_nfc_rule(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static void igc_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_get_max_rss_queues(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_ethtool_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_get_max_rss_queues(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static int igc_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case igc_i225: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 eeer; + + if (hw->dev_spec._base.eee_enable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + *edata = adapter->eee; + edata->supported = SUPPORTED_Autoneg; + + eeer = rd32(IGC_EEER); + + /* EEE status on negotiated link */ + if (eeer & IGC_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & IGC_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + + edata->eee_enabled = hw->dev_spec._base.eee_enable; + + edata->advertised = SUPPORTED_Autoneg; + edata->lp_advertised = SUPPORTED_Autoneg; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igc_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igc_ethtool_get_eee(netdev, &eee_curr); + if (ret_val) { + netdev_err(netdev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + netdev_err(netdev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + netdev_err(netdev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + } else if (!edata->eee_enabled) { + netdev_err(netdev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { + hw->dev_spec._base.eee_enable = edata->eee_enabled; + adapter->flags |= IGC_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + + /* advertising link modes */ + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + break; + } + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + } else { + speed = SPEED_1000; + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int +igc_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + netdev_err(dev, "Cannot change link characteristics when reset is active\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + netdev_info(dev, "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static void igc_ethtool_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(adapter->netdev, "Offline testing starting"); + set_bit(__IGC_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + igc_close(netdev); + else + igc_reset(adapter); + + netdev_info(adapter->netdev, "Register testing starting"); + if (!igc_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + netdev_info(adapter->netdev, "EEPROM testing starting"); + if (!igc_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + /* loopback and interrupt tests + * will be implemented in the future + */ + data[TEST_LOOP] = 0; + data[TEST_IRQ] = 0; + + clear_bit(__IGC_TESTING, &adapter->state); + if (if_running) + igc_open(netdev); + } else { + netdev_info(adapter->netdev, "Online testing starting"); + + /* register, eeprom, intr and loopback tests not run online */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + msleep_interruptible(4 * 1000); +} + +static const struct ethtool_ops igc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igc_ethtool_get_drvinfo, + .get_regs_len = igc_ethtool_get_regs_len, + .get_regs = igc_ethtool_get_regs, + .get_wol = igc_ethtool_get_wol, + .set_wol = igc_ethtool_set_wol, + .get_msglevel = igc_ethtool_get_msglevel, + .set_msglevel = igc_ethtool_set_msglevel, + .nway_reset = igc_ethtool_nway_reset, + .get_link = igc_ethtool_get_link, + .get_eeprom_len = igc_ethtool_get_eeprom_len, + .get_eeprom = igc_ethtool_get_eeprom, + .set_eeprom = igc_ethtool_set_eeprom, + .get_ringparam = igc_ethtool_get_ringparam, + .set_ringparam = igc_ethtool_set_ringparam, + .get_pauseparam = igc_ethtool_get_pauseparam, + .set_pauseparam = igc_ethtool_set_pauseparam, + .get_strings = igc_ethtool_get_strings, + .get_sset_count = igc_ethtool_get_sset_count, + .get_ethtool_stats = igc_ethtool_get_stats, + .get_coalesce = igc_ethtool_get_coalesce, + .set_coalesce = igc_ethtool_set_coalesce, + .get_rxnfc = igc_ethtool_get_rxnfc, + .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, + .get_rxfh = igc_ethtool_get_rxfh, + .set_rxfh = igc_ethtool_set_rxfh, + .get_ts_info = igc_ethtool_get_ts_info, + .get_channels = igc_ethtool_get_channels, + .set_channels = igc_ethtool_set_channels, + .get_priv_flags = igc_ethtool_get_priv_flags, + .set_priv_flags = igc_ethtool_set_priv_flags, + .get_eee = igc_ethtool_get_eee, + .set_eee = igc_ethtool_set_eee, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_ethtool_get_link_ksettings, + .set_link_ksettings = igc_ethtool_set_link_ksettings, + .self_test = igc_ethtool_diag_test, +}; + +void igc_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/devices/igc/igc_ethtool-6.1-ethercat.c b/devices/igc/igc_ethtool-6.1-ethercat.c new file mode 100644 index 00000000..e388be10 --- /dev/null +++ b/devices/igc/igc_ethtool-6.1-ethercat.c @@ -0,0 +1,1980 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include +#include +#include + +#include "igc-6.1-ethercat.h" +#include "igc_diag-6.1-ethercat.h" + +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u16 nvm_version = 0; + u16 gphy_version; + + strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + + /* NVM image version is reported as firmware version for i225 device */ + hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version); + + /* gPHY firmware version is reported as PHY FW version */ + gphy_version = igc_read_phy_fw_version(hw); + + scnprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%x:%x", + nvm_version, + gphy_version); + + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_ethtool_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); + + /* XXX: Due to a bug few lines above, RAL and RAH registers are + * overwritten. To preserve the ABI, we write these registers again in + * regs_buff. + */ + for (i = 0; i < 16; i++) + regs_buff[172 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[188 + i] = rd32(IGC_RAH(i)); + + regs_buff[204] = rd32(IGC_VLANPQF); + + for (i = 0; i < 8; i++) + regs_buff[205 + i] = rd32(IGC_ETQF(i)); + + regs_buff[213] = adapter->stats.tlpic; + regs_buff[214] = adapter->stats.rlpic; +} + +static void igc_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & IGC_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IGC_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IGC_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IGC_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & IGC_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igc_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IGC_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IGC_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IGC_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IGC_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= IGC_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static u32 igc_ethtool_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_ethtool_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_ethtool_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_ethtool_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void +igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int +igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_ethtool_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_ethtool_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igc_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static int igc_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igc_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_rule *rule = NULL; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) + goto out; + + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data)); + } + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +out: + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; +} + +static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_rule *rule; + int cnt = 0; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (cnt == cmd->rule_cnt) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EMSGSIZE; + } + rule_locs[cnt] = rule->location; + cnt++; + } + + mutex_unlock(&adapter->nfc_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + return 0; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_rule_count; + return 0; + case ETHTOOL_GRXCLSRULE: + return igc_ethtool_get_nfc_rule(adapter, cmd); + case ETHTOOL_GRXCLSRLALL: + return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); + case ETHTOOL_GRXFH: + return igc_ethtool_get_rss_hash_opts(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + netdev_err(adapter->netdev, + "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + const struct ethtool_rx_flow_spec *fsp) +{ + INIT_LIST_HEAD(&rule->list); + + rule->action = fsp->ring_cookie; + rule->location = fsp->location; + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); + rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Both source and destination address filters only support the full + * mask. + */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(rule->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(rule->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + /* VLAN etype matching */ + if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { + rule->filter.vlan_etype = fsp->h_ext.vlan_etype; + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; + } + + /* Check for user defined data */ + if ((fsp->flow_type & FLOW_EXT) && + (fsp->h_ext.data[0] || fsp->h_ext.data[1])) { + rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA; + memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data)); + memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); + } + + /* When multiple filter options or user data or vlan etype is set, use a + * flex filter. + */ + if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || + (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + (rule->filter.match_flags & (rule->filter.match_flags - 1))) + rule->flex = true; + else + rule->flex = false; +} + +/** + * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid + * @adapter: Pointer to adapter + * @rule: Rule under evaluation + * + * The driver doesn't support rules with multiple matches so if more than + * one bit in filter flags is set, @rule is considered invalid. + * + * Also, if there is already another rule with the same filter in a different + * location, @rule is considered invalid. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct net_device *dev = adapter->netdev; + u8 flags = rule->filter.match_flags; + struct igc_nfc_rule *tmp; + + if (!flags) { + netdev_dbg(dev, "Rule with no match\n"); + return -EINVAL; + } + + list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { + if (!memcmp(&rule->filter, &tmp->filter, + sizeof(rule->filter)) && + tmp->location != rule->location) { + netdev_dbg(dev, "Rule already exists\n"); + return -EEXIST; + } + } + + return 0; +} + +static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule, *old_rule; + int err; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) { + netdev_dbg(netdev, "N-tuple filters disabled\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { + netdev_dbg(netdev, "Only ethernet flow type is supported\n"); + return -EOPNOTSUPP; + } + + if (fsp->ring_cookie >= adapter->num_rx_queues) { + netdev_dbg(netdev, "Invalid action\n"); + return -EINVAL; + } + + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + igc_ethtool_init_nfc_rule(rule, fsp); + + mutex_lock(&adapter->nfc_rule_lock); + + err = igc_ethtool_check_nfc_rule(adapter, rule); + if (err) + goto err; + + old_rule = igc_get_nfc_rule(adapter, fsp->location); + if (old_rule) + igc_del_nfc_rule(adapter, old_rule); + + err = igc_add_nfc_rule(adapter, rule); + if (err) + goto err; + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +err: + mutex_unlock(&adapter->nfc_rule_lock); + kfree(rule); + return err; +} + +static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; + } + + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; +} + +static int igc_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return igc_ethtool_set_rss_hash_opt(adapter, cmd); + case ETHTOOL_SRXCLSRLINS: + return igc_ethtool_add_nfc_rule(adapter, cmd); + case ETHTOOL_SRXCLSRLDEL: + return igc_ethtool_del_nfc_rule(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static void igc_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_get_max_rss_queues(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_ethtool_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_get_max_rss_queues(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static int igc_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case igc_i225: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 eeer; + + if (hw->dev_spec._base.eee_enable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + *edata = adapter->eee; + edata->supported = SUPPORTED_Autoneg; + + eeer = rd32(IGC_EEER); + + /* EEE status on negotiated link */ + if (eeer & IGC_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & IGC_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + + edata->eee_enabled = hw->dev_spec._base.eee_enable; + + edata->advertised = SUPPORTED_Autoneg; + edata->lp_advertised = SUPPORTED_Autoneg; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igc_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igc_ethtool_get_eee(netdev, &eee_curr); + if (ret_val) { + netdev_err(netdev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + netdev_err(netdev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + netdev_err(netdev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + } else if (!edata->eee_enabled) { + netdev_err(netdev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { + hw->dev_spec._base.eee_enable = edata->eee_enabled; + adapter->flags |= IGC_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + + /* advertising link modes */ + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + break; + } + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + } else { + speed = SPEED_1000; + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int +igc_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + netdev_err(dev, "Cannot change link characteristics when reset is active\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + netdev_info(dev, "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static void igc_ethtool_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(adapter->netdev, "Offline testing starting"); + set_bit(__IGC_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + igc_close(netdev); + else + igc_reset(adapter); + + netdev_info(adapter->netdev, "Register testing starting"); + if (!igc_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + netdev_info(adapter->netdev, "EEPROM testing starting"); + if (!igc_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + /* loopback and interrupt tests + * will be implemented in the future + */ + data[TEST_LOOP] = 0; + data[TEST_IRQ] = 0; + + clear_bit(__IGC_TESTING, &adapter->state); + if (if_running) + igc_open(netdev); + } else { + netdev_info(adapter->netdev, "Online testing starting"); + + /* register, eeprom, intr and loopback tests not run online */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + msleep_interruptible(4 * 1000); +} + +static const struct ethtool_ops igc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igc_ethtool_get_drvinfo, + .get_regs_len = igc_ethtool_get_regs_len, + .get_regs = igc_ethtool_get_regs, + .get_wol = igc_ethtool_get_wol, + .set_wol = igc_ethtool_set_wol, + .get_msglevel = igc_ethtool_get_msglevel, + .set_msglevel = igc_ethtool_set_msglevel, + .nway_reset = igc_ethtool_nway_reset, + .get_link = igc_ethtool_get_link, + .get_eeprom_len = igc_ethtool_get_eeprom_len, + .get_eeprom = igc_ethtool_get_eeprom, + .set_eeprom = igc_ethtool_set_eeprom, + .get_ringparam = igc_ethtool_get_ringparam, + .set_ringparam = igc_ethtool_set_ringparam, + .get_pauseparam = igc_ethtool_get_pauseparam, + .set_pauseparam = igc_ethtool_set_pauseparam, + .get_strings = igc_ethtool_get_strings, + .get_sset_count = igc_ethtool_get_sset_count, + .get_ethtool_stats = igc_ethtool_get_stats, + .get_coalesce = igc_ethtool_get_coalesce, + .set_coalesce = igc_ethtool_set_coalesce, + .get_rxnfc = igc_ethtool_get_rxnfc, + .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, + .get_rxfh = igc_ethtool_get_rxfh, + .set_rxfh = igc_ethtool_set_rxfh, + .get_ts_info = igc_ethtool_get_ts_info, + .get_channels = igc_ethtool_get_channels, + .set_channels = igc_ethtool_set_channels, + .get_priv_flags = igc_ethtool_get_priv_flags, + .set_priv_flags = igc_ethtool_set_priv_flags, + .get_eee = igc_ethtool_get_eee, + .set_eee = igc_ethtool_set_eee, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_ethtool_get_link_ksettings, + .set_link_ksettings = igc_ethtool_set_link_ksettings, + .self_test = igc_ethtool_diag_test, +}; + +void igc_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/devices/igc/igc_ethtool-6.1-orig.c b/devices/igc/igc_ethtool-6.1-orig.c new file mode 100644 index 00000000..8cc077b7 --- /dev/null +++ b/devices/igc/igc_ethtool-6.1-orig.c @@ -0,0 +1,1980 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include +#include +#include + +#include "igc.h" +#include "igc_diag.h" + +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u16 nvm_version = 0; + u16 gphy_version; + + strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + + /* NVM image version is reported as firmware version for i225 device */ + hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version); + + /* gPHY firmware version is reported as PHY FW version */ + gphy_version = igc_read_phy_fw_version(hw); + + scnprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%x:%x", + nvm_version, + gphy_version); + + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_ethtool_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); + + /* XXX: Due to a bug few lines above, RAL and RAH registers are + * overwritten. To preserve the ABI, we write these registers again in + * regs_buff. + */ + for (i = 0; i < 16; i++) + regs_buff[172 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[188 + i] = rd32(IGC_RAH(i)); + + regs_buff[204] = rd32(IGC_VLANPQF); + + for (i = 0; i < 8; i++) + regs_buff[205 + i] = rd32(IGC_ETQF(i)); + + regs_buff[213] = adapter->stats.tlpic; + regs_buff[214] = adapter->stats.rlpic; +} + +static void igc_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & IGC_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IGC_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IGC_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IGC_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & IGC_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igc_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IGC_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IGC_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IGC_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IGC_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= IGC_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static u32 igc_ethtool_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_ethtool_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_ethtool_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_ethtool_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void +igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int +igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_ethtool_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_ethtool_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igc_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static int igc_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igc_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_rule *rule = NULL; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) + goto out; + + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data)); + } + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +out: + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; +} + +static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_rule *rule; + int cnt = 0; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (cnt == cmd->rule_cnt) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EMSGSIZE; + } + rule_locs[cnt] = rule->location; + cnt++; + } + + mutex_unlock(&adapter->nfc_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + return 0; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_rule_count; + return 0; + case ETHTOOL_GRXCLSRULE: + return igc_ethtool_get_nfc_rule(adapter, cmd); + case ETHTOOL_GRXCLSRLALL: + return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); + case ETHTOOL_GRXFH: + return igc_ethtool_get_rss_hash_opts(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + netdev_err(adapter->netdev, + "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + const struct ethtool_rx_flow_spec *fsp) +{ + INIT_LIST_HEAD(&rule->list); + + rule->action = fsp->ring_cookie; + rule->location = fsp->location; + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); + rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Both source and destination address filters only support the full + * mask. + */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(rule->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(rule->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + /* VLAN etype matching */ + if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { + rule->filter.vlan_etype = fsp->h_ext.vlan_etype; + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; + } + + /* Check for user defined data */ + if ((fsp->flow_type & FLOW_EXT) && + (fsp->h_ext.data[0] || fsp->h_ext.data[1])) { + rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA; + memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data)); + memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); + } + + /* When multiple filter options or user data or vlan etype is set, use a + * flex filter. + */ + if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || + (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + (rule->filter.match_flags & (rule->filter.match_flags - 1))) + rule->flex = true; + else + rule->flex = false; +} + +/** + * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid + * @adapter: Pointer to adapter + * @rule: Rule under evaluation + * + * The driver doesn't support rules with multiple matches so if more than + * one bit in filter flags is set, @rule is considered invalid. + * + * Also, if there is already another rule with the same filter in a different + * location, @rule is considered invalid. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct net_device *dev = adapter->netdev; + u8 flags = rule->filter.match_flags; + struct igc_nfc_rule *tmp; + + if (!flags) { + netdev_dbg(dev, "Rule with no match\n"); + return -EINVAL; + } + + list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { + if (!memcmp(&rule->filter, &tmp->filter, + sizeof(rule->filter)) && + tmp->location != rule->location) { + netdev_dbg(dev, "Rule already exists\n"); + return -EEXIST; + } + } + + return 0; +} + +static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule, *old_rule; + int err; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) { + netdev_dbg(netdev, "N-tuple filters disabled\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { + netdev_dbg(netdev, "Only ethernet flow type is supported\n"); + return -EOPNOTSUPP; + } + + if (fsp->ring_cookie >= adapter->num_rx_queues) { + netdev_dbg(netdev, "Invalid action\n"); + return -EINVAL; + } + + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + igc_ethtool_init_nfc_rule(rule, fsp); + + mutex_lock(&adapter->nfc_rule_lock); + + err = igc_ethtool_check_nfc_rule(adapter, rule); + if (err) + goto err; + + old_rule = igc_get_nfc_rule(adapter, fsp->location); + if (old_rule) + igc_del_nfc_rule(adapter, old_rule); + + err = igc_add_nfc_rule(adapter, rule); + if (err) + goto err; + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +err: + mutex_unlock(&adapter->nfc_rule_lock); + kfree(rule); + return err; +} + +static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; + } + + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; +} + +static int igc_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return igc_ethtool_set_rss_hash_opt(adapter, cmd); + case ETHTOOL_SRXCLSRLINS: + return igc_ethtool_add_nfc_rule(adapter, cmd); + case ETHTOOL_SRXCLSRLDEL: + return igc_ethtool_del_nfc_rule(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static void igc_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_get_max_rss_queues(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_ethtool_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_get_max_rss_queues(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static int igc_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case igc_i225: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 eeer; + + if (hw->dev_spec._base.eee_enable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + *edata = adapter->eee; + edata->supported = SUPPORTED_Autoneg; + + eeer = rd32(IGC_EEER); + + /* EEE status on negotiated link */ + if (eeer & IGC_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & IGC_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + + edata->eee_enabled = hw->dev_spec._base.eee_enable; + + edata->advertised = SUPPORTED_Autoneg; + edata->lp_advertised = SUPPORTED_Autoneg; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igc_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igc_ethtool_get_eee(netdev, &eee_curr); + if (ret_val) { + netdev_err(netdev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + netdev_err(netdev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + netdev_err(netdev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + } else if (!edata->eee_enabled) { + netdev_err(netdev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { + hw->dev_spec._base.eee_enable = edata->eee_enabled; + adapter->flags |= IGC_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + + /* advertising link modes */ + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + break; + } + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + } else { + speed = SPEED_1000; + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int +igc_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + netdev_err(dev, "Cannot change link characteristics when reset is active\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + netdev_info(dev, "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static void igc_ethtool_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(adapter->netdev, "Offline testing starting"); + set_bit(__IGC_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + igc_close(netdev); + else + igc_reset(adapter); + + netdev_info(adapter->netdev, "Register testing starting"); + if (!igc_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + netdev_info(adapter->netdev, "EEPROM testing starting"); + if (!igc_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + /* loopback and interrupt tests + * will be implemented in the future + */ + data[TEST_LOOP] = 0; + data[TEST_IRQ] = 0; + + clear_bit(__IGC_TESTING, &adapter->state); + if (if_running) + igc_open(netdev); + } else { + netdev_info(adapter->netdev, "Online testing starting"); + + /* register, eeprom, intr and loopback tests not run online */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + msleep_interruptible(4 * 1000); +} + +static const struct ethtool_ops igc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igc_ethtool_get_drvinfo, + .get_regs_len = igc_ethtool_get_regs_len, + .get_regs = igc_ethtool_get_regs, + .get_wol = igc_ethtool_get_wol, + .set_wol = igc_ethtool_set_wol, + .get_msglevel = igc_ethtool_get_msglevel, + .set_msglevel = igc_ethtool_set_msglevel, + .nway_reset = igc_ethtool_nway_reset, + .get_link = igc_ethtool_get_link, + .get_eeprom_len = igc_ethtool_get_eeprom_len, + .get_eeprom = igc_ethtool_get_eeprom, + .set_eeprom = igc_ethtool_set_eeprom, + .get_ringparam = igc_ethtool_get_ringparam, + .set_ringparam = igc_ethtool_set_ringparam, + .get_pauseparam = igc_ethtool_get_pauseparam, + .set_pauseparam = igc_ethtool_set_pauseparam, + .get_strings = igc_ethtool_get_strings, + .get_sset_count = igc_ethtool_get_sset_count, + .get_ethtool_stats = igc_ethtool_get_stats, + .get_coalesce = igc_ethtool_get_coalesce, + .set_coalesce = igc_ethtool_set_coalesce, + .get_rxnfc = igc_ethtool_get_rxnfc, + .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, + .get_rxfh = igc_ethtool_get_rxfh, + .set_rxfh = igc_ethtool_set_rxfh, + .get_ts_info = igc_ethtool_get_ts_info, + .get_channels = igc_ethtool_get_channels, + .set_channels = igc_ethtool_set_channels, + .get_priv_flags = igc_ethtool_get_priv_flags, + .set_priv_flags = igc_ethtool_set_priv_flags, + .get_eee = igc_ethtool_get_eee, + .set_eee = igc_ethtool_set_eee, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_ethtool_get_link_ksettings, + .set_link_ksettings = igc_ethtool_set_link_ksettings, + .self_test = igc_ethtool_diag_test, +}; + +void igc_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/devices/igc/igc_ethtool-6.4-ethercat.c b/devices/igc/igc_ethtool-6.4-ethercat.c index 3e6c7af6..964d5475 100644 --- a/devices/igc/igc_ethtool-6.4-ethercat.c +++ b/devices/igc/igc_ethtool-6.4-ethercat.c @@ -9,6 +9,18 @@ #include "igc-6.4-ethercat.h" #include "igc_diag-6.4-ethercat.h" +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + + /* forward declaration */ struct igc_stats { char stat_string[ETH_GSTRING_LEN]; @@ -1420,11 +1432,20 @@ static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) return IGC_RETA_SIZE; } +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 6 +static int igc_ethtool_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) +#else static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#endif { struct igc_adapter *adapter = netdev_priv(netdev); int i; +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 6 + u32 *indir = rxfh->indir; + u8 *hfunc = &rxfh->hfunc; +#endif if (hfunc) *hfunc = ETH_RSS_HASH_TOP; @@ -1436,12 +1457,23 @@ static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, return 0; } +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 6 +static int igc_ethtool_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +#else static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +#endif { struct igc_adapter *adapter = netdev_priv(netdev); u32 num_queues; int i; +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 6 + const u8 *key = rxfh->key; + const u32 *indir = rxfh->indir; + const u8 hfunc = rxfh->hfunc; +#endif /* We do not allow change in unsupported parameters */ if (key || diff --git a/devices/igc/igc_hw-5.14-ethercat.h b/devices/igc/igc_hw-5.14-ethercat.h new file mode 100644 index 00000000..cd907ce5 --- /dev/null +++ b/devices/igc/igc_hw-5.14-ethercat.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include +#include +#include + +#include "igc_regs-5.14-ethercat.h" +#include "igc_defines-5.14-ethercat.h" +#include "igc_mac-5.14-ethercat.h" +#include "igc_phy-5.14-ethercat.h" +#include "igc_nvm-5.14-ethercat.h" +#include "igc_i225-5.14-ethercat.h" +#include "igc_base-5.14-ethercat.h" + +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 +#define IGC_DEV_ID_I225_K2 0x3101 +#define IGC_DEV_ID_I226_K 0x3102 +#define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I225_IT 0x0D9F +#define IGC_DEV_ID_I226_LM 0x125B +#define IGC_DEV_ID_I226_V 0x125C +#define IGC_DEV_ID_I226_IT 0x125D +#define IGC_DEV_ID_I221_V 0x125E +#define IGC_DEV_ID_I226_BLANK_NVM 0x125F +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, + u16 *duplex); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +enum igc_mac_type { + igc_undefined = 0, + igc_i225, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_phy_type { + igc_phy_unknown = 0, + igc_phy_none, + igc_phy_i225, +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, + igc_nvm_flash_hw, + igc_nvm_invm, +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_base_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum igc_mac_type type; + + u32 mc_filter_type; + + u16 mta_reg_count; + u16 uta_reg_count; + + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool asf_firmware_present; + bool arc_subsystem_valid; + + bool autoneg; + bool autoneg_failed; + bool get_link_status; +}; + +struct igc_nvm_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + s32 (*update)(struct igc_hw *hw); + s32 (*validate)(struct igc_hw *hw); +}; + +struct igc_phy_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_phy_info)(struct igc_hw *hw); + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + + enum igc_phy_type type; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + + u8 mdix; + + bool is_mdix; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_bus_info { + u16 func; + u16 pci_cmd_word; +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* Type of flow control */ + enum igc_fc_mode requested_mode; +}; + +struct igc_dev_spec_base { + bool clear_semaphore_once; + bool eee_enable; +}; + +struct igc_hw { + void *back; + + u8 __iomem *hw_addr; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_nvm_info nvm; + struct igc_phy_info phy; + + struct igc_bus_info bus; + + union { + struct igc_dev_spec_base _base; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 tlpic; + u64 rlpic; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 htdpmc; + u64 rpthc; + u64 hgptc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct net_device *igc_get_hw_dev(struct igc_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif /* _IGC_HW_H_ */ diff --git a/devices/igc/igc_hw-5.14-orig.h b/devices/igc/igc_hw-5.14-orig.h new file mode 100644 index 00000000..4e020333 --- /dev/null +++ b/devices/igc/igc_hw-5.14-orig.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include +#include +#include + +#include "igc_regs.h" +#include "igc_defines.h" +#include "igc_mac.h" +#include "igc_phy.h" +#include "igc_nvm.h" +#include "igc_i225.h" +#include "igc_base.h" + +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 +#define IGC_DEV_ID_I225_K2 0x3101 +#define IGC_DEV_ID_I226_K 0x3102 +#define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I225_IT 0x0D9F +#define IGC_DEV_ID_I226_LM 0x125B +#define IGC_DEV_ID_I226_V 0x125C +#define IGC_DEV_ID_I226_IT 0x125D +#define IGC_DEV_ID_I221_V 0x125E +#define IGC_DEV_ID_I226_BLANK_NVM 0x125F +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, + u16 *duplex); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +enum igc_mac_type { + igc_undefined = 0, + igc_i225, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_phy_type { + igc_phy_unknown = 0, + igc_phy_none, + igc_phy_i225, +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, + igc_nvm_flash_hw, + igc_nvm_invm, +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_base_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum igc_mac_type type; + + u32 mc_filter_type; + + u16 mta_reg_count; + u16 uta_reg_count; + + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool asf_firmware_present; + bool arc_subsystem_valid; + + bool autoneg; + bool autoneg_failed; + bool get_link_status; +}; + +struct igc_nvm_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + s32 (*update)(struct igc_hw *hw); + s32 (*validate)(struct igc_hw *hw); +}; + +struct igc_phy_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_phy_info)(struct igc_hw *hw); + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + + enum igc_phy_type type; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + + u8 mdix; + + bool is_mdix; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_bus_info { + u16 func; + u16 pci_cmd_word; +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* Type of flow control */ + enum igc_fc_mode requested_mode; +}; + +struct igc_dev_spec_base { + bool clear_semaphore_once; + bool eee_enable; +}; + +struct igc_hw { + void *back; + + u8 __iomem *hw_addr; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_nvm_info nvm; + struct igc_phy_info phy; + + struct igc_bus_info bus; + + union { + struct igc_dev_spec_base _base; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 tlpic; + u64 rlpic; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 htdpmc; + u64 rpthc; + u64 hgptc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct net_device *igc_get_hw_dev(struct igc_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif /* _IGC_HW_H_ */ diff --git a/devices/igc/igc_hw-6.1-ethercat.h b/devices/igc/igc_hw-6.1-ethercat.h new file mode 100644 index 00000000..eceaaf0d --- /dev/null +++ b/devices/igc/igc_hw-6.1-ethercat.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include +#include +#include + +#include "igc_regs-6.1-ethercat.h" +#include "igc_defines-6.1-ethercat.h" +#include "igc_mac-6.1-ethercat.h" +#include "igc_phy-6.1-ethercat.h" +#include "igc_nvm-6.1-ethercat.h" +#include "igc_i225-6.1-ethercat.h" +#include "igc_base-6.1-ethercat.h" + +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 +#define IGC_DEV_ID_I225_K2 0x3101 +#define IGC_DEV_ID_I226_K 0x3102 +#define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I226_LMVP 0x5503 +#define IGC_DEV_ID_I225_IT 0x0D9F +#define IGC_DEV_ID_I226_LM 0x125B +#define IGC_DEV_ID_I226_V 0x125C +#define IGC_DEV_ID_I226_IT 0x125D +#define IGC_DEV_ID_I221_V 0x125E +#define IGC_DEV_ID_I226_BLANK_NVM 0x125F +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, + u16 *duplex); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +enum igc_mac_type { + igc_undefined = 0, + igc_i225, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_base_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum igc_mac_type type; + + u32 mc_filter_type; + + u16 mta_reg_count; + u16 uta_reg_count; + + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + bool asf_firmware_present; + bool arc_subsystem_valid; + + bool autoneg; + bool autoneg_failed; + bool get_link_status; +}; + +struct igc_nvm_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + s32 (*update)(struct igc_hw *hw); + s32 (*validate)(struct igc_hw *hw); +}; + +struct igc_phy_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_phy_info)(struct igc_hw *hw); + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + + u8 mdix; + + bool is_mdix; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_bus_info { + u16 func; + u16 pci_cmd_word; +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* Type of flow control */ + enum igc_fc_mode requested_mode; +}; + +struct igc_dev_spec_base { + bool clear_semaphore_once; + bool eee_enable; +}; + +struct igc_hw { + void *back; + + u8 __iomem *hw_addr; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_nvm_info nvm; + struct igc_phy_info phy; + + struct igc_bus_info bus; + + union { + struct igc_dev_spec_base _base; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 tlpic; + u64 rlpic; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 htdpmc; + u64 rpthc; + u64 hgptc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct net_device *igc_get_hw_dev(struct igc_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif /* _IGC_HW_H_ */ diff --git a/devices/igc/igc_hw-6.1-orig.h b/devices/igc/igc_hw-6.1-orig.h new file mode 100644 index 00000000..88680e3d --- /dev/null +++ b/devices/igc/igc_hw-6.1-orig.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include +#include +#include + +#include "igc_regs.h" +#include "igc_defines.h" +#include "igc_mac.h" +#include "igc_phy.h" +#include "igc_nvm.h" +#include "igc_i225.h" +#include "igc_base.h" + +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 +#define IGC_DEV_ID_I225_K2 0x3101 +#define IGC_DEV_ID_I226_K 0x3102 +#define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I226_LMVP 0x5503 +#define IGC_DEV_ID_I225_IT 0x0D9F +#define IGC_DEV_ID_I226_LM 0x125B +#define IGC_DEV_ID_I226_V 0x125C +#define IGC_DEV_ID_I226_IT 0x125D +#define IGC_DEV_ID_I221_V 0x125E +#define IGC_DEV_ID_I226_BLANK_NVM 0x125F +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, + u16 *duplex); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +enum igc_mac_type { + igc_undefined = 0, + igc_i225, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_base_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum igc_mac_type type; + + u32 mc_filter_type; + + u16 mta_reg_count; + u16 uta_reg_count; + + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + bool asf_firmware_present; + bool arc_subsystem_valid; + + bool autoneg; + bool autoneg_failed; + bool get_link_status; +}; + +struct igc_nvm_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + s32 (*update)(struct igc_hw *hw); + s32 (*validate)(struct igc_hw *hw); +}; + +struct igc_phy_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_phy_info)(struct igc_hw *hw); + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + + u8 mdix; + + bool is_mdix; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_bus_info { + u16 func; + u16 pci_cmd_word; +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* Type of flow control */ + enum igc_fc_mode requested_mode; +}; + +struct igc_dev_spec_base { + bool clear_semaphore_once; + bool eee_enable; +}; + +struct igc_hw { + void *back; + + u8 __iomem *hw_addr; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_nvm_info nvm; + struct igc_phy_info phy; + + struct igc_bus_info bus; + + union { + struct igc_dev_spec_base _base; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 tlpic; + u64 rlpic; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 htdpmc; + u64 rpthc; + u64 hgptc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct net_device *igc_get_hw_dev(struct igc_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif /* _IGC_HW_H_ */ diff --git a/devices/igc/igc_i225-5.14-ethercat.c b/devices/igc/igc_i225-5.14-ethercat.c new file mode 100644 index 00000000..5829686c --- /dev/null +++ b/devices/igc/igc_i225-5.14-ethercat.c @@ -0,0 +1,645 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw-5.14-ethercat.h" + +/** + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + u32 swsm; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._base.clear_semaphore_once) { + hw->dev_spec._base.clear_semaphore_once = false; + igc_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usleep_range(500, 600); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return 0; +} + +/** + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + s32 i = 0, timeout = 200; + u32 fwmask = mask << 16; + u32 swmask = mask; + s32 ret_val = 0; + u32 swfw_sync; + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igc_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igc_get_hw_semaphore_i225(hw)) + ; /* Empty */ + + swfw_sync = rd32(IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +} + +/** + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_read_nvm_eerd(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + wr32(IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + rd32(IGC_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + + return ret_val; +} + +/** + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_write_nvm_srwr(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, + u16 *data); + s32 status = 0; + + status = hw->nvm.ops.acquire(hw); + if (status) + goto out; + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + +out: + return status; +} + +/** + * igc_pool_flash_update_done_i225 - Pool FLUDONE status + * @hw: pointer to the HW structure + */ +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = rd32(IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igc_update_flash_i225 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + */ +static s32 igc_update_flash_i225(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; + wr32(IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + u16 checksum = 0; + s32 ret_val = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + +out: + return ret_val; +} + +/** + * igc_get_flash_presence_i225 - Check if flash device is detected + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + bool ret_val = false; + u32 eec = 0; + + eec = rd32(IGC_EECD); + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + + /* NVM Function Pointers */ + if (igc_get_flash_presence_i225(hw)) { + hw->nvm.type = igc_nvm_flash_hw; + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + hw->nvm.type = igc_nvm_invm; + nvm->ops.read = igc_read_nvm_eerd; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + ipcnfg = rd32(IGC_IPCNFG); + eeer = rd32(IGC_EEER); + + /* enable or disable per user setting */ + if (hw->dev_spec._base.eee_enable) { + u32 eee_su = rd32(IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + wr32(IGC_IPCNFG, ipcnfg); + wr32(IGC_EEER, eeer); + rd32(IGC_IPCNFG); + rd32(IGC_EEER); + + return IGC_SUCCESS; +} + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + u16 speed, duplex; + s32 size; + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->dev_spec._base.eee_enable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = rd32(IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + wr32(IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) { + tw_system = ((rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + } else { + tw_system = (rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = rd32(IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Calculations vary based on DMAC settings. */ + if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) { + size -= (rd32(IGC_DMACR) & + IGC_DMACR_DMACTHR_MASK) >> + IGC_DMACR_DMACTHR_SHIFT; + /* Convert size to bits. */ + size *= 1024 * 8; + } else { + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size *= 8; + } + + if (size < 0) { + hw_dbg("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_min -= 1; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + ltr_max -= 1; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = rd32(IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + wr32(IGC_LTRMINV, ltrv); + } + + ltrv = rd32(IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_min << IGC_LTRMAXV_SCALE_SHIFT); + wr32(IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} diff --git a/devices/igc/igc_i225-5.14-ethercat.h b/devices/igc/igc_i225-5.14-ethercat.h new file mode 100644 index 00000000..dae47e4f --- /dev/null +++ b/devices/igc/igc_i225-5.14-ethercat.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); + +s32 igc_init_nvm_params_i225(struct igc_hw *hw); +bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link); + +#endif diff --git a/devices/igc/igc_i225-5.14-orig.c b/devices/igc/igc_i225-5.14-orig.c new file mode 100644 index 00000000..b2ef9fde --- /dev/null +++ b/devices/igc/igc_i225-5.14-orig.c @@ -0,0 +1,645 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw.h" + +/** + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + u32 swsm; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._base.clear_semaphore_once) { + hw->dev_spec._base.clear_semaphore_once = false; + igc_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usleep_range(500, 600); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return 0; +} + +/** + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + s32 i = 0, timeout = 200; + u32 fwmask = mask << 16; + u32 swmask = mask; + s32 ret_val = 0; + u32 swfw_sync; + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igc_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igc_get_hw_semaphore_i225(hw)) + ; /* Empty */ + + swfw_sync = rd32(IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +} + +/** + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_read_nvm_eerd(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + wr32(IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + rd32(IGC_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + + return ret_val; +} + +/** + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_write_nvm_srwr(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, + u16 *data); + s32 status = 0; + + status = hw->nvm.ops.acquire(hw); + if (status) + goto out; + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + +out: + return status; +} + +/** + * igc_pool_flash_update_done_i225 - Pool FLUDONE status + * @hw: pointer to the HW structure + */ +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = rd32(IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igc_update_flash_i225 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + */ +static s32 igc_update_flash_i225(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; + wr32(IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + u16 checksum = 0; + s32 ret_val = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + +out: + return ret_val; +} + +/** + * igc_get_flash_presence_i225 - Check if flash device is detected + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + bool ret_val = false; + u32 eec = 0; + + eec = rd32(IGC_EECD); + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + + /* NVM Function Pointers */ + if (igc_get_flash_presence_i225(hw)) { + hw->nvm.type = igc_nvm_flash_hw; + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + hw->nvm.type = igc_nvm_invm; + nvm->ops.read = igc_read_nvm_eerd; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + ipcnfg = rd32(IGC_IPCNFG); + eeer = rd32(IGC_EEER); + + /* enable or disable per user setting */ + if (hw->dev_spec._base.eee_enable) { + u32 eee_su = rd32(IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + wr32(IGC_IPCNFG, ipcnfg); + wr32(IGC_EEER, eeer); + rd32(IGC_IPCNFG); + rd32(IGC_EEER); + + return IGC_SUCCESS; +} + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + u16 speed, duplex; + s32 size; + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->dev_spec._base.eee_enable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = rd32(IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + wr32(IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) { + tw_system = ((rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + } else { + tw_system = (rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = rd32(IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Calculations vary based on DMAC settings. */ + if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) { + size -= (rd32(IGC_DMACR) & + IGC_DMACR_DMACTHR_MASK) >> + IGC_DMACR_DMACTHR_SHIFT; + /* Convert size to bits. */ + size *= 1024 * 8; + } else { + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size *= 8; + } + + if (size < 0) { + hw_dbg("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_min -= 1; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + ltr_max -= 1; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = rd32(IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + wr32(IGC_LTRMINV, ltrv); + } + + ltrv = rd32(IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_min << IGC_LTRMAXV_SCALE_SHIFT); + wr32(IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} diff --git a/devices/igc/igc_i225-5.14-orig.h b/devices/igc/igc_i225-5.14-orig.h new file mode 100644 index 00000000..dae47e4f --- /dev/null +++ b/devices/igc/igc_i225-5.14-orig.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); + +s32 igc_init_nvm_params_i225(struct igc_hw *hw); +bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link); + +#endif diff --git a/devices/igc/igc_i225-6.1-ethercat.c b/devices/igc/igc_i225-6.1-ethercat.c new file mode 100644 index 00000000..c70d8e7d --- /dev/null +++ b/devices/igc/igc_i225-6.1-ethercat.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw-6.1-ethercat.h" + +/** + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + u32 swsm; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._base.clear_semaphore_once) { + hw->dev_spec._base.clear_semaphore_once = false; + igc_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usleep_range(500, 600); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return 0; +} + +/** + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + s32 i = 0, timeout = 200; + u32 fwmask = mask << 16; + u32 swmask = mask; + s32 ret_val = 0; + u32 swfw_sync; + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igc_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +} + +/** + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_read_nvm_eerd(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + wr32(IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + rd32(IGC_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + + return ret_val; +} + +/** + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_write_nvm_srwr(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, + u16 *data); + s32 status = 0; + + status = hw->nvm.ops.acquire(hw); + if (status) + goto out; + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + +out: + return status; +} + +/** + * igc_pool_flash_update_done_i225 - Pool FLUDONE status + * @hw: pointer to the HW structure + */ +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = rd32(IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igc_update_flash_i225 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + */ +static s32 igc_update_flash_i225(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; + wr32(IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + u16 checksum = 0; + s32 ret_val = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + +out: + return ret_val; +} + +/** + * igc_get_flash_presence_i225 - Check if flash device is detected + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + bool ret_val = false; + u32 eec = 0; + + eec = rd32(IGC_EECD); + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + + /* NVM Function Pointers */ + if (igc_get_flash_presence_i225(hw)) { + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + nvm->ops.read = igc_read_nvm_eerd; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + ipcnfg = rd32(IGC_IPCNFG); + eeer = rd32(IGC_EEER); + + /* enable or disable per user setting */ + if (hw->dev_spec._base.eee_enable) { + u32 eee_su = rd32(IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + wr32(IGC_IPCNFG, ipcnfg); + wr32(IGC_EEER, eeer); + rd32(IGC_IPCNFG); + rd32(IGC_EEER); + + return IGC_SUCCESS; +} + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + u16 speed, duplex; + s32 size; + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->dev_spec._base.eee_enable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = rd32(IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + wr32(IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) { + tw_system = ((rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + } else { + tw_system = (rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = rd32(IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Calculations vary based on DMAC settings. */ + if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) { + size -= (rd32(IGC_DMACR) & + IGC_DMACR_DMACTHR_MASK) >> + IGC_DMACR_DMACTHR_SHIFT; + /* Convert size to bits. */ + size *= 1024 * 8; + } else { + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size *= 8; + } + + if (size < 0) { + hw_dbg("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_min -= 1; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + ltr_max -= 1; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = rd32(IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + wr32(IGC_LTRMINV, ltrv); + } + + ltrv = rd32(IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_max << IGC_LTRMAXV_SCALE_SHIFT); + wr32(IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} diff --git a/devices/igc/igc_i225-6.1-ethercat.h b/devices/igc/igc_i225-6.1-ethercat.h new file mode 100644 index 00000000..dae47e4f --- /dev/null +++ b/devices/igc/igc_i225-6.1-ethercat.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); + +s32 igc_init_nvm_params_i225(struct igc_hw *hw); +bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link); + +#endif diff --git a/devices/igc/igc_i225-6.1-orig.c b/devices/igc/igc_i225-6.1-orig.c new file mode 100644 index 00000000..59d5c467 --- /dev/null +++ b/devices/igc/igc_i225-6.1-orig.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw.h" + +/** + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + u32 swsm; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._base.clear_semaphore_once) { + hw->dev_spec._base.clear_semaphore_once = false; + igc_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usleep_range(500, 600); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return 0; +} + +/** + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + s32 i = 0, timeout = 200; + u32 fwmask = mask << 16; + u32 swmask = mask; + s32 ret_val = 0; + u32 swfw_sync; + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igc_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +} + +/** + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_read_nvm_eerd(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + wr32(IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + rd32(IGC_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + + return ret_val; +} + +/** + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_write_nvm_srwr(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, + u16 *data); + s32 status = 0; + + status = hw->nvm.ops.acquire(hw); + if (status) + goto out; + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + +out: + return status; +} + +/** + * igc_pool_flash_update_done_i225 - Pool FLUDONE status + * @hw: pointer to the HW structure + */ +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = rd32(IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igc_update_flash_i225 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + */ +static s32 igc_update_flash_i225(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; + wr32(IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + u16 checksum = 0; + s32 ret_val = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + +out: + return ret_val; +} + +/** + * igc_get_flash_presence_i225 - Check if flash device is detected + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + bool ret_val = false; + u32 eec = 0; + + eec = rd32(IGC_EECD); + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + + /* NVM Function Pointers */ + if (igc_get_flash_presence_i225(hw)) { + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + nvm->ops.read = igc_read_nvm_eerd; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + ipcnfg = rd32(IGC_IPCNFG); + eeer = rd32(IGC_EEER); + + /* enable or disable per user setting */ + if (hw->dev_spec._base.eee_enable) { + u32 eee_su = rd32(IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + wr32(IGC_IPCNFG, ipcnfg); + wr32(IGC_EEER, eeer); + rd32(IGC_IPCNFG); + rd32(IGC_EEER); + + return IGC_SUCCESS; +} + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + u16 speed, duplex; + s32 size; + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->dev_spec._base.eee_enable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = rd32(IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + wr32(IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) { + tw_system = ((rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + } else { + tw_system = (rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = rd32(IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Calculations vary based on DMAC settings. */ + if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) { + size -= (rd32(IGC_DMACR) & + IGC_DMACR_DMACTHR_MASK) >> + IGC_DMACR_DMACTHR_SHIFT; + /* Convert size to bits. */ + size *= 1024 * 8; + } else { + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size *= 8; + } + + if (size < 0) { + hw_dbg("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_min -= 1; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + ltr_max -= 1; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = rd32(IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + wr32(IGC_LTRMINV, ltrv); + } + + ltrv = rd32(IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_max << IGC_LTRMAXV_SCALE_SHIFT); + wr32(IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} diff --git a/devices/igc/igc_i225-6.1-orig.h b/devices/igc/igc_i225-6.1-orig.h new file mode 100644 index 00000000..dae47e4f --- /dev/null +++ b/devices/igc/igc_i225-6.1-orig.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); + +s32 igc_init_nvm_params_i225(struct igc_hw *hw); +bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link); + +#endif diff --git a/devices/igc/igc_mac-5.14-ethercat.c b/devices/igc/igc_mac-5.14-ethercat.c new file mode 100644 index 00000000..d04d3a0a --- /dev/null +++ b/devices/igc/igc_mac-5.14-ethercat.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include + +#include "igc_mac-5.14-ethercat.h" +#include "igc_hw-5.14-ethercat.h" + +/** + * igc_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + wr32(IGC_CTRL, ctrl); + + while (timeout) { + if (!(rd32(IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(2000, 3000); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_init_rx_addrs - Initialize receive addresses + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) +{ + u8 mac_addr[ETH_ALEN] = {0}; + u32 i; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** + * igc_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 igc_setup_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igc_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * to the both 'rx' and 'tx' pause frames. + */ + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(IGC_FCT, FLOW_CONTROL_TYPE); + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(IGC_FCTTV, hw->fc.pause_time); + + ret_val = igc_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igc_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + wr32(IGC_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igc_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void igc_clear_hw_cntrs_base(struct igc_hw *hw) +{ + rd32(IGC_CRCERRS); + rd32(IGC_MPC); + rd32(IGC_SCC); + rd32(IGC_ECOL); + rd32(IGC_MCC); + rd32(IGC_LATECOL); + rd32(IGC_COLC); + rd32(IGC_RERC); + rd32(IGC_DC); + rd32(IGC_RLEC); + rd32(IGC_XONRXC); + rd32(IGC_XONTXC); + rd32(IGC_XOFFRXC); + rd32(IGC_XOFFTXC); + rd32(IGC_FCRUC); + rd32(IGC_GPRC); + rd32(IGC_BPRC); + rd32(IGC_MPRC); + rd32(IGC_GPTC); + rd32(IGC_GORCL); + rd32(IGC_GORCH); + rd32(IGC_GOTCL); + rd32(IGC_GOTCH); + rd32(IGC_RNBC); + rd32(IGC_RUC); + rd32(IGC_RFC); + rd32(IGC_ROC); + rd32(IGC_RJC); + rd32(IGC_TORL); + rd32(IGC_TORH); + rd32(IGC_TOTL); + rd32(IGC_TOTH); + rd32(IGC_TPR); + rd32(IGC_TPT); + rd32(IGC_MPTC); + rd32(IGC_BPTC); + + rd32(IGC_PRC64); + rd32(IGC_PRC127); + rd32(IGC_PRC255); + rd32(IGC_PRC511); + rd32(IGC_PRC1023); + rd32(IGC_PRC1522); + rd32(IGC_PTC64); + rd32(IGC_PTC127); + rd32(IGC_PTC255); + rd32(IGC_PTC511); + rd32(IGC_PTC1023); + rd32(IGC_PTC1522); + + rd32(IGC_ALGNERRC); + rd32(IGC_RXERRC); + rd32(IGC_TNCRS); + rd32(IGC_HTDPMC); + rd32(IGC_TSCTC); + + rd32(IGC_MGTPRC); + rd32(IGC_MGTPDC); + rd32(IGC_MGTPTC); + + rd32(IGC_IAC); + + rd32(IGC_RPTHC); + rd32(IGC_TLPIC); + rd32(IGC_RLPIC); + rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); + rd32(IGC_HGORCL); + rd32(IGC_HGORCH); + rd32(IGC_HGOTCL); + rd32(IGC_HGOTCH); + rd32(IGC_LENERRS); +} + +/** + * igc_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/** + * igc_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_copper_link(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + bool link = false; + s32 ret_val; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igc_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + + return ret_val; +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void igc_config_collision_dist(struct igc_hw *hw) +{ + u32 tctl; + + tctl = rd32(IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + wr32(IGC_TCTL, tctl); + wrfl(); +} + +/** + * igc_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 igc_config_fc_after_link_up(struct igc_hw *hw) +{ + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + struct igc_mac_info *mac = &hw->mac; + u16 speed, duplex; + s32 ret_val = 0; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) + ret_val = igc_force_mac_fc(hw); + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == igc_fc_none) || + (hw->fc.requested_mode == igc_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = igc_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 igc_get_auto_rd_done(struct igc_hw *hw) +{ + s32 ret_val = 0; + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -IGC_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igc_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void igc_put_hw_semaphore(struct igc_hw *hw) +{ + u32 swsm; + + swsm = rd32(IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + wr32(IGC_SWSM, swsm); +} + +/** + * igc_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + */ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + bool ret_val = false; + u32 fwsm, factps; + u32 manc; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(IGC_FWSM); + factps = rd32(IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igc_mta_set() + **/ +static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igc_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = igc_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += ETH_ALEN; + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} diff --git a/devices/igc/igc_mac-5.14-ethercat.h b/devices/igc/igc_mac-5.14-ethercat.h new file mode 100644 index 00000000..b44ffe5a --- /dev/null +++ b/devices/igc/igc_mac-5.14-ethercat.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +#include "igc_hw-5.14-ethercat.h" +#include "igc_phy-5.14-ethercat.h" +#include "igc_defines-5.14-ethercat.h" + +/* forward declaration */ +s32 igc_disable_pcie_master(struct igc_hw *hw); +s32 igc_check_for_copper_link(struct igc_hw *hw); +s32 igc_config_fc_after_link_up(struct igc_hw *hw); +s32 igc_force_mac_fc(struct igc_hw *hw); +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); +s32 igc_setup_link(struct igc_hw *hw); +void igc_clear_hw_cntrs_base(struct igc_hw *hw); +s32 igc_get_auto_rd_done(struct igc_hw *hw); +void igc_put_hw_semaphore(struct igc_hw *hw); +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +void igc_config_collision_dist(struct igc_hw *hw); + +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex); + +bool igc_enable_mng_pass_thru(struct igc_hw *hw); +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#endif diff --git a/devices/igc/igc_mac-5.14-orig.c b/devices/igc/igc_mac-5.14-orig.c new file mode 100644 index 00000000..67b8ffd2 --- /dev/null +++ b/devices/igc/igc_mac-5.14-orig.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include + +#include "igc_mac.h" +#include "igc_hw.h" + +/** + * igc_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + wr32(IGC_CTRL, ctrl); + + while (timeout) { + if (!(rd32(IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(2000, 3000); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_init_rx_addrs - Initialize receive addresses + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) +{ + u8 mac_addr[ETH_ALEN] = {0}; + u32 i; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** + * igc_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 igc_setup_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igc_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * to the both 'rx' and 'tx' pause frames. + */ + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(IGC_FCT, FLOW_CONTROL_TYPE); + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(IGC_FCTTV, hw->fc.pause_time); + + ret_val = igc_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igc_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + wr32(IGC_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igc_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void igc_clear_hw_cntrs_base(struct igc_hw *hw) +{ + rd32(IGC_CRCERRS); + rd32(IGC_MPC); + rd32(IGC_SCC); + rd32(IGC_ECOL); + rd32(IGC_MCC); + rd32(IGC_LATECOL); + rd32(IGC_COLC); + rd32(IGC_RERC); + rd32(IGC_DC); + rd32(IGC_RLEC); + rd32(IGC_XONRXC); + rd32(IGC_XONTXC); + rd32(IGC_XOFFRXC); + rd32(IGC_XOFFTXC); + rd32(IGC_FCRUC); + rd32(IGC_GPRC); + rd32(IGC_BPRC); + rd32(IGC_MPRC); + rd32(IGC_GPTC); + rd32(IGC_GORCL); + rd32(IGC_GORCH); + rd32(IGC_GOTCL); + rd32(IGC_GOTCH); + rd32(IGC_RNBC); + rd32(IGC_RUC); + rd32(IGC_RFC); + rd32(IGC_ROC); + rd32(IGC_RJC); + rd32(IGC_TORL); + rd32(IGC_TORH); + rd32(IGC_TOTL); + rd32(IGC_TOTH); + rd32(IGC_TPR); + rd32(IGC_TPT); + rd32(IGC_MPTC); + rd32(IGC_BPTC); + + rd32(IGC_PRC64); + rd32(IGC_PRC127); + rd32(IGC_PRC255); + rd32(IGC_PRC511); + rd32(IGC_PRC1023); + rd32(IGC_PRC1522); + rd32(IGC_PTC64); + rd32(IGC_PTC127); + rd32(IGC_PTC255); + rd32(IGC_PTC511); + rd32(IGC_PTC1023); + rd32(IGC_PTC1522); + + rd32(IGC_ALGNERRC); + rd32(IGC_RXERRC); + rd32(IGC_TNCRS); + rd32(IGC_HTDPMC); + rd32(IGC_TSCTC); + + rd32(IGC_MGTPRC); + rd32(IGC_MGTPDC); + rd32(IGC_MGTPTC); + + rd32(IGC_IAC); + + rd32(IGC_RPTHC); + rd32(IGC_TLPIC); + rd32(IGC_RLPIC); + rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); + rd32(IGC_HGORCL); + rd32(IGC_HGORCH); + rd32(IGC_HGOTCL); + rd32(IGC_HGOTCH); + rd32(IGC_LENERRS); +} + +/** + * igc_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/** + * igc_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_copper_link(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + bool link = false; + s32 ret_val; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igc_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + + return ret_val; +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void igc_config_collision_dist(struct igc_hw *hw) +{ + u32 tctl; + + tctl = rd32(IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + wr32(IGC_TCTL, tctl); + wrfl(); +} + +/** + * igc_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 igc_config_fc_after_link_up(struct igc_hw *hw) +{ + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + struct igc_mac_info *mac = &hw->mac; + u16 speed, duplex; + s32 ret_val = 0; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) + ret_val = igc_force_mac_fc(hw); + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == igc_fc_none) || + (hw->fc.requested_mode == igc_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = igc_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 igc_get_auto_rd_done(struct igc_hw *hw) +{ + s32 ret_val = 0; + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -IGC_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igc_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void igc_put_hw_semaphore(struct igc_hw *hw) +{ + u32 swsm; + + swsm = rd32(IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + wr32(IGC_SWSM, swsm); +} + +/** + * igc_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + */ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + bool ret_val = false; + u32 fwsm, factps; + u32 manc; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(IGC_FWSM); + factps = rd32(IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igc_mta_set() + **/ +static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igc_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = igc_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += ETH_ALEN; + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} diff --git a/devices/igc/igc_mac-5.14-orig.h b/devices/igc/igc_mac-5.14-orig.h new file mode 100644 index 00000000..b5963f86 --- /dev/null +++ b/devices/igc/igc_mac-5.14-orig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +#include "igc_hw.h" +#include "igc_phy.h" +#include "igc_defines.h" + +/* forward declaration */ +s32 igc_disable_pcie_master(struct igc_hw *hw); +s32 igc_check_for_copper_link(struct igc_hw *hw); +s32 igc_config_fc_after_link_up(struct igc_hw *hw); +s32 igc_force_mac_fc(struct igc_hw *hw); +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); +s32 igc_setup_link(struct igc_hw *hw); +void igc_clear_hw_cntrs_base(struct igc_hw *hw); +s32 igc_get_auto_rd_done(struct igc_hw *hw); +void igc_put_hw_semaphore(struct igc_hw *hw); +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +void igc_config_collision_dist(struct igc_hw *hw); + +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex); + +bool igc_enable_mng_pass_thru(struct igc_hw *hw); +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#endif diff --git a/devices/igc/igc_mac-6.1-ethercat.c b/devices/igc/igc_mac-6.1-ethercat.c new file mode 100644 index 00000000..cb6b6870 --- /dev/null +++ b/devices/igc/igc_mac-6.1-ethercat.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include + +#include "igc_mac-6.1-ethercat.h" +#include "igc_hw-6.1-ethercat.h" + +/** + * igc_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + wr32(IGC_CTRL, ctrl); + + while (timeout) { + if (!(rd32(IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(2000, 3000); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_init_rx_addrs - Initialize receive addresses + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) +{ + u8 mac_addr[ETH_ALEN] = {0}; + u32 i; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** + * igc_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 igc_setup_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igc_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * to the both 'rx' and 'tx' pause frames. + */ + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(IGC_FCT, FLOW_CONTROL_TYPE); + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(IGC_FCTTV, hw->fc.pause_time); + + ret_val = igc_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igc_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + wr32(IGC_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igc_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void igc_clear_hw_cntrs_base(struct igc_hw *hw) +{ + rd32(IGC_CRCERRS); + rd32(IGC_MPC); + rd32(IGC_SCC); + rd32(IGC_ECOL); + rd32(IGC_MCC); + rd32(IGC_LATECOL); + rd32(IGC_COLC); + rd32(IGC_RERC); + rd32(IGC_DC); + rd32(IGC_RLEC); + rd32(IGC_XONRXC); + rd32(IGC_XONTXC); + rd32(IGC_XOFFRXC); + rd32(IGC_XOFFTXC); + rd32(IGC_FCRUC); + rd32(IGC_GPRC); + rd32(IGC_BPRC); + rd32(IGC_MPRC); + rd32(IGC_GPTC); + rd32(IGC_GORCL); + rd32(IGC_GORCH); + rd32(IGC_GOTCL); + rd32(IGC_GOTCH); + rd32(IGC_RNBC); + rd32(IGC_RUC); + rd32(IGC_RFC); + rd32(IGC_ROC); + rd32(IGC_RJC); + rd32(IGC_TORL); + rd32(IGC_TORH); + rd32(IGC_TOTL); + rd32(IGC_TOTH); + rd32(IGC_TPR); + rd32(IGC_TPT); + rd32(IGC_MPTC); + rd32(IGC_BPTC); + + rd32(IGC_PRC64); + rd32(IGC_PRC127); + rd32(IGC_PRC255); + rd32(IGC_PRC511); + rd32(IGC_PRC1023); + rd32(IGC_PRC1522); + rd32(IGC_PTC64); + rd32(IGC_PTC127); + rd32(IGC_PTC255); + rd32(IGC_PTC511); + rd32(IGC_PTC1023); + rd32(IGC_PTC1522); + + rd32(IGC_ALGNERRC); + rd32(IGC_RXERRC); + rd32(IGC_TNCRS); + rd32(IGC_HTDPMC); + rd32(IGC_TSCTC); + + rd32(IGC_MGTPRC); + rd32(IGC_MGTPDC); + rd32(IGC_MGTPTC); + + rd32(IGC_IAC); + + rd32(IGC_RPTHC); + rd32(IGC_TLPIC); + rd32(IGC_RLPIC); + rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); + rd32(IGC_HGORCL); + rd32(IGC_HGORCH); + rd32(IGC_HGOTCL); + rd32(IGC_HGOTCH); + rd32(IGC_LENERRS); +} + +/** + * igc_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/** + * igc_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_copper_link(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + bool link = false; + s32 ret_val; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igc_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + + return ret_val; +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void igc_config_collision_dist(struct igc_hw *hw) +{ + u32 tctl; + + tctl = rd32(IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + wr32(IGC_TCTL, tctl); + wrfl(); +} + +/** + * igc_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 igc_config_fc_after_link_up(struct igc_hw *hw) +{ + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + struct igc_mac_info *mac = &hw->mac; + u16 speed, duplex; + s32 ret_val = 0; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) + ret_val = igc_force_mac_fc(hw); + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == igc_fc_none) || + (hw->fc.requested_mode == igc_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = igc_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 igc_get_auto_rd_done(struct igc_hw *hw) +{ + s32 ret_val = 0; + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -IGC_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igc_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void igc_put_hw_semaphore(struct igc_hw *hw) +{ + u32 swsm; + + swsm = rd32(IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + wr32(IGC_SWSM, swsm); +} + +/** + * igc_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + */ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + bool ret_val = false; + u32 fwsm, factps; + u32 manc; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(IGC_FWSM); + factps = rd32(IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igc_mta_set() + **/ +static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igc_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = igc_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += ETH_ALEN; + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} diff --git a/devices/igc/igc_mac-6.1-ethercat.h b/devices/igc/igc_mac-6.1-ethercat.h new file mode 100644 index 00000000..2fa73377 --- /dev/null +++ b/devices/igc/igc_mac-6.1-ethercat.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +#include "igc_hw-6.1-ethercat.h" +#include "igc_phy-6.1-ethercat.h" +#include "igc_defines-6.1-ethercat.h" + +/* forward declaration */ +s32 igc_disable_pcie_master(struct igc_hw *hw); +s32 igc_check_for_copper_link(struct igc_hw *hw); +s32 igc_config_fc_after_link_up(struct igc_hw *hw); +s32 igc_force_mac_fc(struct igc_hw *hw); +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); +s32 igc_setup_link(struct igc_hw *hw); +void igc_clear_hw_cntrs_base(struct igc_hw *hw); +s32 igc_get_auto_rd_done(struct igc_hw *hw); +void igc_put_hw_semaphore(struct igc_hw *hw); +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +void igc_config_collision_dist(struct igc_hw *hw); + +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex); + +bool igc_enable_mng_pass_thru(struct igc_hw *hw); +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#endif diff --git a/devices/igc/igc_mac-6.1-orig.c b/devices/igc/igc_mac-6.1-orig.c new file mode 100644 index 00000000..a5c4b19d --- /dev/null +++ b/devices/igc/igc_mac-6.1-orig.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include + +#include "igc_mac.h" +#include "igc_hw.h" + +/** + * igc_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + wr32(IGC_CTRL, ctrl); + + while (timeout) { + if (!(rd32(IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(2000, 3000); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_init_rx_addrs - Initialize receive addresses + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) +{ + u8 mac_addr[ETH_ALEN] = {0}; + u32 i; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** + * igc_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 igc_setup_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igc_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * to the both 'rx' and 'tx' pause frames. + */ + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(IGC_FCT, FLOW_CONTROL_TYPE); + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(IGC_FCTTV, hw->fc.pause_time); + + ret_val = igc_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igc_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + wr32(IGC_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igc_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void igc_clear_hw_cntrs_base(struct igc_hw *hw) +{ + rd32(IGC_CRCERRS); + rd32(IGC_MPC); + rd32(IGC_SCC); + rd32(IGC_ECOL); + rd32(IGC_MCC); + rd32(IGC_LATECOL); + rd32(IGC_COLC); + rd32(IGC_RERC); + rd32(IGC_DC); + rd32(IGC_RLEC); + rd32(IGC_XONRXC); + rd32(IGC_XONTXC); + rd32(IGC_XOFFRXC); + rd32(IGC_XOFFTXC); + rd32(IGC_FCRUC); + rd32(IGC_GPRC); + rd32(IGC_BPRC); + rd32(IGC_MPRC); + rd32(IGC_GPTC); + rd32(IGC_GORCL); + rd32(IGC_GORCH); + rd32(IGC_GOTCL); + rd32(IGC_GOTCH); + rd32(IGC_RNBC); + rd32(IGC_RUC); + rd32(IGC_RFC); + rd32(IGC_ROC); + rd32(IGC_RJC); + rd32(IGC_TORL); + rd32(IGC_TORH); + rd32(IGC_TOTL); + rd32(IGC_TOTH); + rd32(IGC_TPR); + rd32(IGC_TPT); + rd32(IGC_MPTC); + rd32(IGC_BPTC); + + rd32(IGC_PRC64); + rd32(IGC_PRC127); + rd32(IGC_PRC255); + rd32(IGC_PRC511); + rd32(IGC_PRC1023); + rd32(IGC_PRC1522); + rd32(IGC_PTC64); + rd32(IGC_PTC127); + rd32(IGC_PTC255); + rd32(IGC_PTC511); + rd32(IGC_PTC1023); + rd32(IGC_PTC1522); + + rd32(IGC_ALGNERRC); + rd32(IGC_RXERRC); + rd32(IGC_TNCRS); + rd32(IGC_HTDPMC); + rd32(IGC_TSCTC); + + rd32(IGC_MGTPRC); + rd32(IGC_MGTPDC); + rd32(IGC_MGTPTC); + + rd32(IGC_IAC); + + rd32(IGC_RPTHC); + rd32(IGC_TLPIC); + rd32(IGC_RLPIC); + rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); + rd32(IGC_HGORCL); + rd32(IGC_HGORCH); + rd32(IGC_HGOTCL); + rd32(IGC_HGOTCH); + rd32(IGC_LENERRS); +} + +/** + * igc_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/** + * igc_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_copper_link(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + bool link = false; + s32 ret_val; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igc_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + + return ret_val; +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void igc_config_collision_dist(struct igc_hw *hw) +{ + u32 tctl; + + tctl = rd32(IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + wr32(IGC_TCTL, tctl); + wrfl(); +} + +/** + * igc_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 igc_config_fc_after_link_up(struct igc_hw *hw) +{ + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + struct igc_mac_info *mac = &hw->mac; + u16 speed, duplex; + s32 ret_val = 0; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) + ret_val = igc_force_mac_fc(hw); + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == igc_fc_none) || + (hw->fc.requested_mode == igc_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = igc_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 igc_get_auto_rd_done(struct igc_hw *hw) +{ + s32 ret_val = 0; + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -IGC_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igc_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void igc_put_hw_semaphore(struct igc_hw *hw) +{ + u32 swsm; + + swsm = rd32(IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + wr32(IGC_SWSM, swsm); +} + +/** + * igc_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + */ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + bool ret_val = false; + u32 fwsm, factps; + u32 manc; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(IGC_FWSM); + factps = rd32(IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igc_mta_set() + **/ +static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igc_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = igc_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += ETH_ALEN; + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} diff --git a/devices/igc/igc_mac-6.1-orig.h b/devices/igc/igc_mac-6.1-orig.h new file mode 100644 index 00000000..b5963f86 --- /dev/null +++ b/devices/igc/igc_mac-6.1-orig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +#include "igc_hw.h" +#include "igc_phy.h" +#include "igc_defines.h" + +/* forward declaration */ +s32 igc_disable_pcie_master(struct igc_hw *hw); +s32 igc_check_for_copper_link(struct igc_hw *hw); +s32 igc_config_fc_after_link_up(struct igc_hw *hw); +s32 igc_force_mac_fc(struct igc_hw *hw); +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); +s32 igc_setup_link(struct igc_hw *hw); +void igc_clear_hw_cntrs_base(struct igc_hw *hw); +s32 igc_get_auto_rd_done(struct igc_hw *hw); +void igc_put_hw_semaphore(struct igc_hw *hw); +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +void igc_config_collision_dist(struct igc_hw *hw); + +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex); + +bool igc_enable_mng_pass_thru(struct igc_hw *hw); +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#endif diff --git a/devices/igc/igc_main-5.14-ethercat.c b/devices/igc/igc_main-5.14-ethercat.c new file mode 100644 index 00000000..2dac8faf --- /dev/null +++ b/devices/igc/igc_main-5.14-ethercat.c @@ -0,0 +1,6734 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc-5.14-ethercat.h" +#include "igc_hw-5.14-ethercat.h" +#include "igc_tsn-5.14-ethercat.h" +#include "igc_xdp-5.14-ethercat.h" + +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + +#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver (EtherCAT enabled)" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IGC_XDP_PASS 0 +#define IGC_XDP_CONSUMED BIT(0) +#define IGC_XDP_TX BIT(1) +#define IGC_XDP_REDIRECT BIT(2) + +static int debug = -1; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL v2"); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +char igc_driver_name[] = "ec_igc"; +static const char igc_driver_string[] = DRV_SUMMARY; +static const char igc_copyright[] = + "Copyright(c) 2018 Intel Corporation."; + +static const struct igc_info *igc_info_tbl[] = { + [board_base] = &igc_base_info, +}; + +static const struct pci_device_id igc_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, + /* required last entry */ + {0, } +}; + +// MODULE_DEVICE_TABLE(pci, igc_pci_tbl); + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +void igc_reset(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition PBA for greater than 9k MTU if required */ + pba = IGC_PBA_34K; + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + hw->mac.ops.reset_hw(hw); + + if (hw->mac.ops.init_hw(hw)) + netdev_err(dev, "Error on hardware initialization\n"); + + /* Re-establish EEE setting */ + igc_set_eee_i225(hw, true, true, true); + + if (!netif_running(adapter->netdev)) + igc_power_down_phy_copper_base(&adapter->hw); + + /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ + wr32(IGC_VET, ETH_P_8021Q); + + /* Re-enable PTP, where applicable. */ + igc_ptp_reset(adapter); + + /* Re-enable TSN offloading, where applicable. */ + igc_tsn_offload_apply(adapter); + + igc_get_phy_info(hw); +} + +/** + * igc_power_up_link - Power up the phy link + * @adapter: address of board private structure + */ +static void igc_power_up_link(struct igc_adapter *adapter) +{ + igc_reset_phy(&adapter->hw); + + igc_power_up_phy_copper(&adapter->hw); + + igc_setup_link(&adapter->hw); +} + +/** + * igc_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void igc_release_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + if (!pci_device_is_present(adapter->pdev)) + return; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + */ +static void igc_get_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) +{ + dma_unmap_single(dev, dma_unmap_addr(buf, dma), + dma_unmap_len(buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(buf, len, 0); +} + +/** + * igc_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; + + while (i != tx_ring->next_to_use) { + union igc_adv_tx_desc *eop_desc, *tx_desc; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + if (!get_ecdev(adapter)) { + /* skb is reused in EtherCAT TX operation */ + dev_kfree_skb_any(tx_buffer->skb); + } + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + } + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGC_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igc_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +void igc_free_tx_resources(struct igc_ring *tx_ring) +{ + igc_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igc_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void igc_free_all_tx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igc_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_tx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igc_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igc_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + */ +int igc_setup_tx_resources(struct igc_ring *tx_ring) +{ + struct net_device *ndev = tx_ring->netdev; + struct device *dev = tx_ring->dev; + int size = 0; + + size = sizeof(struct igc_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_tx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igc_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + netdev_err(dev, "Error on Tx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } +} + +static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) +{ + struct igc_rx_buffer *bi; + u16 i; + + for (i = 0; i < ring->count; i++) { + bi = &ring->rx_buffer_info[i]; + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} + +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *ring) +{ + if (ring->xsk_pool) + igc_clean_rx_ring_xsk_pool(ring); + else + igc_clean_rx_ring_page_shared(ring); + + clear_ring_uses_large_buffer(ring); + + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +/** + * igc_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_rx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igc_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igc_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +void igc_free_rx_resources(struct igc_ring *rx_ring) +{ + igc_clean_rx_ring(rx_ring); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igc_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void igc_free_all_rx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igc_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igc_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + */ +int igc_setup_rx_resources(struct igc_ring *rx_ring) +{ + struct net_device *ndev = rx_ring->netdev; + struct device *dev = rx_ring->dev; + u8 index = rx_ring->queue_index; + int size, desc_len, res; + + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, + rx_ring->q_vector->napi.napi_id); + if (res < 0) { + netdev_err(ndev, "Failed to register xdp_rxq index %u\n", + index); + return res; + } + + size = sizeof(struct igc_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + desc_len = sizeof(union igc_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_rx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igc_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + netdev_err(dev, "Error on Rx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + if (!igc_xdp_is_enabled(adapter) || + !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) + return NULL; + + return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); +} + +/** + * igc_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + union igc_adv_rx_desc *rx_desc; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + u64 rdba = ring->dma; + u32 buf_size; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } + + if (igc_xdp_is_enabled(adapter)) + set_ring_uses_large_buffer(ring); + + /* disable the queue */ + wr32(IGC_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(IGC_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(IGC_RDBAH(reg_idx), rdba >> 32); + wr32(IGC_RDLEN(reg_idx), + ring->count * sizeof(union igc_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); + wr32(IGC_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* reset next-to- use/clean to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGC_RXBUFFER_3072; + else + buf_size = IGC_RXBUFFER_2048; + + srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + wr32(IGC_SRRCTL(reg_idx), srrctl); + + rxdctl |= IGC_RX_PTHRESH; + rxdctl |= IGC_RX_HTHRESH << 8; + rxdctl |= IGC_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igc_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGC_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; + + wr32(IGC_RXDCTL(reg_idx), rxdctl); +} + +/** + * igc_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx(struct igc_adapter *adapter) +{ + int i; + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igc_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + */ +static void igc_configure_tx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + u32 txdctl = 0; + + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + + /* disable the queue */ + wr32(IGC_TXDCTL(reg_idx), 0); + wrfl(); + mdelay(10); + + wr32(IGC_TDLEN(reg_idx), + ring->count * sizeof(union igc_adv_tx_desc)); + wr32(IGC_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(IGC_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); + wr32(IGC_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGC_TX_PTHRESH; + txdctl |= IGC_TX_HTHRESH << 8; + txdctl |= IGC_TX_WTHRESH << 16; + + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + wr32(IGC_TXDCTL(reg_idx), txdctl); +} + +/** + * igc_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + */ +static void igc_configure_tx(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igc_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + */ +static void igc_setup_mrqc(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); +} + +/** + * igc_setup_rctl - configure the receive control registers + * @adapter: Board private structure + */ +static void igc_setup_rctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(IGC_RCTL); + + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); + + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + /* enable stripping of CRC. Newer features require + * that the HW strips the CRC. + */ + rctl |= IGC_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= IGC_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(IGC_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in set_rx_mode + */ + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ + IGC_RCTL_BAM | /* RX All Bcast Pkts */ + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ + } + + wr32(IGC_RCTL, rctl); +} + +/** + * igc_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + */ +static void igc_setup_tctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which icould be enabled by default */ + wr32(IGC_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); + + /* Enable transmits */ + tctl |= IGC_TCTL_EN; + + wr32(IGC_TCTL, tctl); +} + +/** + * igc_set_mac_filter_hw() - Set MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be set + * @index: Filter index + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + */ +static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, + enum igc_mac_filter_type type, + const u8 *addr, int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 ral, rah; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + ral = le32_to_cpup((__le32 *)(addr)); + rah = le16_to_cpup((__le16 *)(addr + 4)); + + if (type == IGC_MAC_FILTER_TYPE_SRC) { + rah &= ~IGC_RAH_ASEL_MASK; + rah |= IGC_RAH_ASEL_SRC_ADDR; + } + + if (queue >= 0) { + rah &= ~IGC_RAH_QSEL_MASK; + rah |= (queue << IGC_RAH_QSEL_SHIFT); + rah |= IGC_RAH_QSEL_ENABLE; + } + + rah |= IGC_RAH_AV; + + wr32(IGC_RAL(index), ral); + wr32(IGC_RAH(index), rah); + + netdev_dbg(dev, "MAC address filter set in HW: index %d", index); +} + +/** + * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be cleared + * @index: Filter index + */ +static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + wr32(IGC_RAL(index), 0); + wr32(IGC_RAH(index), 0); + + netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igc_set_default_mac_filter(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + u8 *addr = adapter->hw.mac.addr; + + netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); + + igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +/** + * igc_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int igc_set_mac(struct net_device *netdev, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, addr->sa_data); +#else + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); +#endif + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igc_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igc_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igc_write_mc_addr_list(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igc_update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igc_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) +{ + ktime_t cycle_time = adapter->cycle_time; + ktime_t base_time = adapter->base_time; + u32 launchtime; + + /* FIXME: when using ETF together with taprio, we may have a + * case where 'delta' is larger than the cycle_time, this may + * cause problems if we don't read the current value of + * IGC_BASET, as the value writen into the launchtime + * descriptor field may be misinterpreted. + */ + div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); + + return cpu_to_le32(launchtime); +} + +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For i225, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid Tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + ktime_t txtime = first->skb->tstamp; + + skb_txtime_consumed(first->skb); + context_desc->launch_time = igc_tx_launchtime(adapter, + txtime); + } else { + context_desc->launch_time = 0; + } +} + +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); +} + +static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + struct igc_adapter *adapter = netdev_priv(netdev); + if (!get_ecdev(adapter)) { + netif_stop_subqueue(netdev, tx_ring->queue_index); + } + + /* memory barriier comment */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igc_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + if (!get_ecdev(adapter)) { + netif_wake_subqueue(netdev, tx_ring->queue_index); + } + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + if (igc_desc_unused(tx_ring) >= size) + return 0; + return __igc_maybe_stop_tx(tx_ring, size); +} + +#define IGC_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, + IGC_ADVTXD_DCMD_VLE); + + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igc_tx_olinfo_status(struct igc_ring *tx_ring, + union igc_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; + + /* insert L4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * + ((IGC_TXD_POPTS_TXSM << 8) / + IGC_TX_FLAGS_CSUM); + + /* insert IPv4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * + (((IGC_TXD_POPTS_IXSM << 8)) / + IGC_TX_FLAGS_IPV4); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int igc_tx_map(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 tx_flags = first->tx_flags; + skb_frag_t *frag; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + dma_addr_t dma; + u32 cmd_type; + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + + cmd_type = igc_tx_cmd_type(skb, tx_flags); + tx_desc = IGC_TX_DESC(tx_ring, i); + + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGC_MAX_DATA_PER_TXD; + size -= IGC_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGC_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + + return 0; +dma_error: + netdev_err(tx_ring->netdev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (!get_ecdev(adapter)) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + tx_ring->next_to_use = i; + + return -1; +} + +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, + struct igc_ring *tx_ring) +{ + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + struct igc_tx_buffer *first; + u32 tx_flags = 0; + unsigned short f; + u8 hdr_len = 0; + int tso = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igc_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGC_TX_BUFFER_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (unlikely(!get_ecdev(adapter) && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + /* FIXME: add support for retrieving timestamps from + * the other timer registers before skipping the + * timestamping request. + */ + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGC_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igc_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first); + + igc_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + if (!get_ecdev(adapter)) { + dev_kfree_skb_any(first->skb); + first->skb = NULL; + } + + return NETDEV_TX_OK; +} + +static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); +} + +static void igc_rx_checksum(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igc_test_staterr(rx_desc, + IGC_RXDEXT_STATERR_L4E | + IGC_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets (aka let the stack check the crc32c) + */ + if (!(skb->len == 60 && + test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | + IGC_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netdev_dbg(ring->netdev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +static inline void igc_rx_hash(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +static void igc_rx_vlan(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { + if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && + test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/** + * igc_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in order + * to populate the hash, checksum, VLAN, protocol, and other fields within the + * skb. + */ +static void igc_process_skb_fields(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + igc_rx_hash(rx_ring, rx_desc, skb); + + igc_rx_checksum(rx_ring, rx_desc, skb); + + igc_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl |= IGC_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~IGC_CTRL_VME; + } + wr32(IGC_CTRL, ctrl); +} + +static void igc_restore_vlan(struct igc_adapter *adapter) +{ + igc_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, + const unsigned int size, + int *rx_buffer_pgcnt) +{ + struct igc_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, + unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) + buffer->page_offset ^= truesize; +#else + buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(ring) / 2; +#else + truesize = ring_uses_build_skb(ring) ? + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +/** + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + */ +static void igc_add_rx_frag(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(rx_ring) / 2; +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + + igc_rx_buffer_flip(rx_buffer, truesize); +} + +static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + union igc_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(va); + + /* build an skb around the page buffer */ + skb = build_skb(va - IGC_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, IGC_SKB_PAD); + __skb_put(skb, size); + + igc_rx_buffer_flip(rx_buffer, truesize); + return skb; +} + +static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + void *va = xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(va); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGC_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + igc_rx_buffer_flip(rx_buffer, truesize); + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * igc_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void igc_reuse_rx_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct igc_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igc_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool igc_is_non_eop(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGC_RX_DESC(rx_ring, ntc)); + + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igc_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool igc_cleanup_headers(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void igc_put_rx_buffer(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + igc_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) +{ + struct igc_adapter *adapter = rx_ring->q_vector->adapter; + + if (ring_uses_build_skb(rx_ring)) + return IGC_SKB_PAD; + if (igc_xdp_is_enabled(adapter)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igc_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igc_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring + * @cleaned_count: number of buffers to clean + */ +static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) +{ + union igc_adv_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct igc_rx_buffer *bi; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGC_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igc_rx_bufsz(rx_ring); + + do { + if (!igc_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGC_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) +{ + union igc_adv_rx_desc *desc; + u16 i = ring->next_to_use; + struct igc_rx_buffer *bi; + dma_addr_t dma; + bool ok = true; + + if (!count) + return ok; + + desc = IGC_RX_DESC(ring, i); + bi = &ring->rx_buffer_info[i]; + i -= ring->count; + + do { + bi->xdp = xsk_buff_alloc(ring->xsk_pool); + if (!bi->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(bi->xdp); + desc->read.pkt_addr = cpu_to_le64(dma); + + desc++; + bi++; + i++; + if (unlikely(!i)) { + desc = IGC_RX_DESC(ring, 0); + bi = ring->rx_buffer_info; + i -= ring->count; + } + + /* Clear the length for the next_to_use descriptor. */ + desc->wb.upper.length = 0; + + count--; + } while (count); + + i += ring->count; + + if (ring->next_to_use != i) { + ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, ring->tail); + } + + return ok; +} + +static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, + struct xdp_frame *xdpf, + struct igc_ring *ring) +{ + dma_addr_t dma; + + dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->type = IGC_TX_BUFFER_TYPE_XDP; + buffer->xdpf = xdpf; + buffer->protocol = 0; + buffer->bytecount = xdpf->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, xdpf->len); + dma_unmap_addr_set(buffer, dma, dma); + return 0; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, + struct xdp_frame *xdpf) +{ + struct igc_tx_buffer *buffer; + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + buffer = &ring->tx_buffer_info[ring->next_to_use]; + err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + buffer->bytecount; + olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); + + netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); + + buffer->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= adapter->num_tx_queues) + index -= adapter->num_tx_queues; + + return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int res; + + if (unlikely(!xdpf)) + return -EFAULT; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + res = igc_xdp_init_tx_descriptor(ring, xdpf); + __netif_tx_unlock(nq); + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __igc_xdp_run_prog(struct igc_adapter *adapter, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + return IGC_XDP_PASS; + case XDP_TX: + if (igc_xdp_xmit_back(adapter, xdp) < 0) + goto out_failure; + return IGC_XDP_TX; + case XDP_REDIRECT: + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + goto out_failure; + return IGC_XDP_REDIRECT; + break; + default: +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); +#else + bpf_warn_invalid_xdp_action(act); +#endif + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + return IGC_XDP_CONSUMED; + } +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(adapter->xdp_prog); + if (!prog) { + res = IGC_XDP_PASS; + goto out; + } + + res = __igc_xdp_run_prog(adapter, prog, xdp); + +out: + return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ + /* Once tail pointer is updated, hardware can fetch the descriptors + * any time so we issue a write membar here to ensure all memory + * writes are complete before the tail pointer is updated. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + + if (status & IGC_XDP_TX) { + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + igc_flush_tx_descriptors(ring); + __netif_tx_unlock(nq); + } + + if (status & IGC_XDP_REDIRECT) + xdp_do_flush(); +} + +static void igc_update_rx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + +static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = igc_desc_unused(rx_ring); + int xdp_status = 0, rx_buffer_pgcnt; + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *rx_desc; + struct igc_rx_buffer *rx_buffer; + unsigned int size, truesize; + ktime_t timestamp = 0; + struct xdp_buff xdp; + int pkt_offset = 0; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { + igc_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); + truesize = igc_get_rx_frame_truesize(rx_ring, size); + + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + pktbuf); + pkt_offset = IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + if (!skb) { + xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); + xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), + igc_rx_offset(rx_ring) + pkt_offset, size, false); + + skb = igc_xdp_run_prog(adapter, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + switch (xdp_res) { + case IGC_XDP_CONSUMED: + rx_buffer->pagecnt_bias++; + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + igc_rx_buffer_flip(rx_buffer, truesize); + xdp_status |= xdp_res; + break; + } + + total_packets++; + total_bytes += size; + } + else if (get_ecdev(adapter)) { + unsigned char *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + ecdev_receive(get_ecdev(adapter), va, size); + adapter->ec_watchdog_jiffies = jiffies; + igc_reuse_rx_page(rx_ring, rx_buffer); + } + else { + if (skb) + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); + else + skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, + timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); + } + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igc_is_non_eop(rx_ring, rx_desc)) + continue; + + + if (get_ecdev(adapter)) { + total_packets++; + continue; + } + + /* verify the packet layout is correct */ + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + igc_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (cleaned_count) + igc_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + unsigned int totalsize = metasize + datasize; + struct sk_buff *skb; + + skb = __napi_alloc_skb(&ring->q_vector->napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); + memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); + if (metasize) + skb_metadata_set(skb, metasize); + + return skb; +} + +static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, + union igc_adv_rx_desc *desc, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + struct igc_ring *ring = q_vector->rx.ring; + struct sk_buff *skb; + + skb = igc_construct_skb_zc(ring, xdp); + if (!skb) { + ring->rx_stats.alloc_failed++; + return; + } + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + if (igc_cleanup_headers(ring, desc, skb)) + return; + + igc_process_skb_fields(ring, desc, skb); + napi_gro_receive(&q_vector->napi, skb); +} + +static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *ring = q_vector->rx.ring; + u16 cleaned_count = igc_desc_unused(ring); + int total_bytes = 0, total_packets = 0; + u16 ntc = ring->next_to_clean; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + + rcu_read_lock(); + + prog = READ_ONCE(adapter->xdp_prog); + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *desc; + struct igc_rx_buffer *bi; + ktime_t timestamp = 0; + unsigned int size; + int res; + + desc = IGC_RX_DESC(ring, ntc); + size = le16_to_cpu(desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = &ring->rx_buffer_info[ntc]; + + if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + bi->xdp->data); + + bi->xdp->data += IGC_TS_HDR_LEN; + + /* HW timestamp has been copied into local variable. Metadata + * length when XDP program is called should be 0. + */ + bi->xdp->data_meta += IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + + res = __igc_xdp_run_prog(adapter, prog, bi->xdp); + switch (res) { + case IGC_XDP_PASS: + igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + fallthrough; + case IGC_XDP_CONSUMED: + xsk_buff_free(bi->xdp); + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + bi->xdp = NULL; + total_bytes += size; + total_packets++; + cleaned_count++; + ntc++; + if (ntc == ring->count) + ntc = 0; + } + + ring->next_to_clean = ntc; + rcu_read_unlock(); + + if (cleaned_count >= IGC_RX_BUFFER_WRITE) + failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (xsk_uses_need_wakeup(ring->xsk_pool)) { + if (failure || ring->next_to_clean == ring->next_to_use) + xsk_set_rx_need_wakeup(ring->xsk_pool); + else + xsk_clear_rx_need_wakeup(ring->xsk_pool); + return total_packets; + } + + return failure ? budget : total_packets; +} + +static void igc_update_tx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->tx.ring; + + u64_stats_update_begin(&ring->tx_syncp); + ring->tx_stats.bytes += bytes; + ring->tx_stats.packets += packets; + u64_stats_update_end(&ring->tx_syncp); + + q_vector->tx.total_bytes += bytes; + q_vector->tx.total_packets += packets; +} + +static void igc_xdp_xmit_zc(struct igc_ring *ring) +{ + struct xsk_buff_pool *pool = ring->xsk_pool; + struct netdev_queue *nq = txring_txq(ring); + union igc_adv_tx_desc *tx_desc = NULL; + int cpu = smp_processor_id(); + u16 ntu = ring->next_to_use; + struct xdp_desc xdp_desc; + u16 budget; + + if (!netif_carrier_ok(ring->netdev)) + return; + + __netif_tx_lock(nq, cpu); + + budget = igc_desc_unused(ring); + + while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + u32 cmd_type, olinfo_status; + struct igc_tx_buffer *bi; + dma_addr_t dma; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + xdp_desc.len; + olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; + + dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + + tx_desc = IGC_TX_DESC(ring, ntu); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + bi = &ring->tx_buffer_info[ntu]; + bi->type = IGC_TX_BUFFER_TYPE_XSK; + bi->protocol = 0; + bi->bytecount = xdp_desc.len; + bi->gso_segs = 1; + bi->time_stamp = jiffies; + bi->next_to_watch = tx_desc; + + netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); + + ntu++; + if (ntu == ring->count) + ntu = 0; + } + + ring->next_to_use = ntu; + if (tx_desc) { + igc_flush_tx_descriptors(ring); + xsk_tx_release(pool); + } + + __netif_tx_unlock(nq); +} + +/** + * igc_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + */ +static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + struct igc_ring *tx_ring = q_vector->tx.ring; + unsigned int i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 xsk_frames = 0; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGC_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + if (!get_ecdev(adapter)) { + napi_consume_skb(tx_buffer->skb, napi_budget); + } + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (!get_ecdev(adapter)) { + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + } + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + igc_update_tx_stats(q_vector, total_packets, total_bytes); + + if (tx_ring->xsk_pool) { + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) + xsk_set_tx_need_wakeup(tx_ring->xsk_pool); + igc_xdp_xmit_zc(tx_ring); + } + + if (!get_ecdev(adapter) && + test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct igc_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { + /* detected Tx unit hang */ + netdev_err(tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(!get_ecdev(adapter) && total_packets && + netif_carrier_ok(tx_ring->netdev) && + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGC_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +static int igc_find_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 ral, rah; + int i; + + for (i = 0; i < max_entries; i++) { + ral = rd32(IGC_RAL(i)); + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + continue; + if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) + continue; + if ((rah & IGC_RAH_RAH_MASK) != + le16_to_cpup((__le16 *)(addr + 4))) + continue; + if (ral != le32_to_cpup((__le32 *)(addr))) + continue; + + return i; + } + + return -1; +} + +static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 rah; + int i; + + for (i = 0; i < max_entries; i++) { + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + return i; + } + + return -1; +} + +/** + * igc_add_mac_filter() - Add MAC address filter + * @adapter: Pointer to adapter where the filter should be added + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr, + int queue) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index >= 0) + goto update_filter; + + index = igc_get_avail_mac_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", + index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr, queue); + +update_filter: + igc_set_mac_filter_hw(adapter, index, type, addr, queue); + return 0; +} + +/** + * igc_del_mac_filter() - Delete MAC address filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @type: MAC address filter type (source or destination) + * @addr: MAC address + */ +static void igc_del_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index < 0) + return; + + if (index == 0) { + /* If this is the default filter, we don't actually delete it. + * We just reset to its default value i.e. disable queue + * assignment. + */ + netdev_dbg(dev, "Disable default MAC filter queue assignment"); + + igc_set_mac_filter_hw(adapter, 0, type, addr, -1); + } else { + netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", + index, + type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr); + + igc_clear_mac_filter_hw(adapter, index); + } +} + +/** + * igc_add_vlan_prio_filter() - Add VLAN priority filter + * @adapter: Pointer to adapter where the filter should be added + * @prio: VLAN priority value + * @queue: Queue number which matching frames are assigned to + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, + int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + if (vlanpqf & IGC_VLANPQF_VALID(prio)) { + netdev_dbg(dev, "VLAN priority filter already in use\n"); + return -EEXIST; + } + + vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); + vlanpqf |= IGC_VLANPQF_VALID(prio); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", + prio, queue); + return 0; +} + +/** + * igc_del_vlan_prio_filter() - Delete VLAN priority filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @prio: VLAN priority value + */ +static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) +{ + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + vlanpqf &= ~IGC_VLANPQF_VALID(prio); + vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", + prio); +} + +static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if (!(etqf & IGC_ETQF_FILTER_ENABLE)) + return i; + } + + return -1; +} + +/** + * igc_add_etype_filter() - Add ethertype filter + * @adapter: Pointer to adapter where the filter should be added + * @etype: Ethertype value + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, + int queue) +{ + struct igc_hw *hw = &adapter->hw; + int index; + u32 etqf; + + index = igc_get_avail_etype_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + etqf = rd32(IGC_ETQF(index)); + + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= etype; + + if (queue >= 0) { + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); + etqf |= IGC_ETQF_QUEUE_ENABLE; + } + + etqf |= IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(index), etqf); + + netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", + etype, queue); + return 0; +} + +static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) + return i; + } + + return -1; +} + +/** + * igc_del_etype_filter() - Delete ethertype filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @etype: Ethertype value + */ +static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int index; + + index = igc_find_etype_filter(adapter, etype); + if (index < 0) + return; + + wr32(IGC_ETQF(index), 0); + + netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", + etype); +} + +static int igc_enable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + int err; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_add_etype_filter(adapter, rule->filter.etype, + rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + err = igc_add_vlan_prio_filter(adapter, prio, rule->action); + if (err) + return err; + } + + return 0; +} + +static void igc_disable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_del_etype_filter(adapter, rule->filter.etype); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + igc_del_vlan_prio_filter(adapter, prio); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr); +} + +/** + * igc_get_nfc_rule() - Get NFC rule + * @adapter: Pointer to adapter + * @location: Rule location + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: Pointer to NFC rule at @location. If not found, NULL. + */ +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location) +{ + struct igc_nfc_rule *rule; + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (rule->location == location) + return rule; + if (rule->location > location) + break; + } + + return NULL; +} + +/** + * igc_del_nfc_rule() - Delete NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be deleted + * + * Disable NFC rule in hardware and delete it from adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + */ +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + igc_disable_nfc_rule(adapter, rule); + + list_del(&rule->list); + adapter->nfc_rule_count--; + + kfree(rule); +} + +static void igc_flush_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule, *tmp; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +/** + * igc_add_nfc_rule() - Add NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be added + * + * Enable NFC rule in hardware and add it to adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 on success, negative errno on failure. + */ +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + struct igc_nfc_rule *pred, *cur; + int err; + + err = igc_enable_nfc_rule(adapter, rule); + if (err) + return err; + + pred = NULL; + list_for_each_entry(cur, &adapter->nfc_rule_list, list) { + if (cur->location >= rule->location) + break; + pred = cur; + } + + list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); + adapter->nfc_rule_count++; + return 0; +} + +static void igc_restore_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) + igc_enable_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); + return 0; +} + +/** + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + */ +static void igc_set_rx_mode(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IGC_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igc_write_mc_addr_list(netdev); + if (count < 0) + rctl |= IGC_RCTL_MPE; + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) + rctl |= IGC_RCTL_UPE; + + /* update state of unicast and multicast */ + rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); + wr32(IGC_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) + rlpml = IGC_MAX_FRAME_BUILD_SKB; +#endif + wr32(IGC_RLPML, rlpml); +} + +/** + * igc_configure - configure the hardware for RX and TX + * @adapter: private board structure + */ +static void igc_configure(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i = 0; + + igc_get_hw_control(adapter); + igc_set_rx_mode(netdev); + + igc_restore_vlan(adapter); + + igc_setup_tctl(adapter); + igc_setup_mrqc(adapter); + igc_setup_rctl(adapter); + + igc_set_default_mac_filter(adapter); + igc_restore_nfc_rules(adapter); + + igc_configure_tx(adapter); + igc_configure_rx(adapter); + + igc_rx_fifo_flush_base(&adapter->hw); + + /* call igc_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); + } +} + +/** + * igc_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + */ +static void igc_write_ivar(struct igc_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(IGC_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; + + array_wr32(IGC_IVAR0, index, ivar); +} + +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + int rx_queue = IGC_N0_QUEUE; + int tx_queue = IGC_N0_QUEUE; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case igc_i225: + if (rx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igc_configure_msix - Configure MSI-X hardware + * @adapter: Pointer to adapter structure + * + * igc_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + */ +static void igc_configure_msix(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i, vector = 0; + u32 tmp; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case igc_i225: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | IGC_IVAR_VALID) << 8; + + wr32(IGC_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igc_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void igc_irq_enable(struct igc_adapter *adapter) +{ + if (get_ecdev(adapter)) { + /* skip enabling interrupts */ + return; + } + + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; + u32 regval = rd32(IGC_EIAC); + + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(IGC_EIAM); + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); + wr32(IGC_EIMS, adapter->eims_enable_mask); + wr32(IGC_IMS, ims); + } else { + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + } +} + +/** + * igc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void igc_irq_disable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 regval = rd32(IGC_EIAM); + + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); + wr32(IGC_EIMC, adapter->eims_enable_mask); + regval = rd32(IGC_EIAC); + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(IGC_IAM, 0); + wr32(IGC_IMC, ~0); + wrfl(); + + if (get_ecdev(adapter)) { + /* skip synchonizing IRQs */ + return; + } + + if (adapter->msix_entries) { + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) +{ + /* Determine if we need to pair queues. */ + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; +} + +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +{ + return IGC_MAX_RX_QUEUES; +} + +static void igc_init_queue_configuration(struct igc_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igc_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igc_set_flag_queue_pairs(adapter, max_rss_queues); +} + +/** + * igc_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igc_free_q_vector. + */ +static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* if we're coming from igc_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); +} + +/** + * igc_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + */ +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igc_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igc_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + */ +static void igc_free_q_vectors(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igc_reset_q_vector(adapter, v_idx); + igc_free_q_vector(adapter, v_idx); + } +} + +/** + * igc_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + */ +static void igc_update_itr(struct igc_q_vector *q_vector, + struct igc_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes / packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igc_set_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + current_itr = 0; + new_itr = IGC_4K_ITR; + goto set_itr_now; + default: + break; + } + + igc_update_itr(q_vector, &q_vector->tx); + igc_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igc_reset_interrupt_capability(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } + + while (v_idx--) + igc_reset_q_vector(adapter, v_idx); +} + +/** + * igc_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: Pointer to adapter structure + * @msix: boolean value for MSI-X capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix) +{ + int numvecs, i; + int err; + + if (!msix) + goto msi_only; + adapter->flags |= IGC_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) + return; + + /* populate entry values */ + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + igc_reset_interrupt_capability(adapter); + +msi_only: + adapter->flags &= ~IGC_FLAG_HAS_MSIX; + + adapter->rss_queues = 1; + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGC_FLAG_HAS_MSI; +} + +/** + * igc_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igc_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + */ +static void igc_update_ring_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGC_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGC_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGC_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +static void igc_ring_irq_enable(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igc_set_itr(q_vector); + else + igc_update_ring_itr(q_vector); + } + + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(IGC_EIMS, q_vector->eims_value); + else + igc_irq_enable(adapter); + } +} + +static void igc_add_ring(struct igc_ring *ring, + struct igc_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igc_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + */ +static void igc_cache_ring_register(struct igc_adapter *adapter) +{ + int i = 0, j = 0; + + switch (adapter->hw.mac.type) { + case igc_i225: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = j; + break; + } +} + +/** + * igc_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + */ +static int igc_poll(struct napi_struct *napi, int budget) +{ + struct igc_q_vector *q_vector = container_of(napi, + struct igc_q_vector, + napi); + struct igc_ring *rx_ring = q_vector->rx.ring; + bool clean_complete = true; + int work_done = 0; + + if (get_ecdev(q_vector->adapter)) + return -EBUSY; + + if (q_vector->tx.ring) + clean_complete = igc_clean_tx_irq(q_vector, budget); + + if (rx_ring) { + int cleaned = rx_ring->xsk_pool ? + igc_clean_rx_irq_zc(q_vector, budget) : + igc_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igc_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igc_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int igc_alloc_q_vector(struct igc_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct igc_q_vector *q_vector; + struct igc_ring *ring; + int ring_count; + + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + else + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igc_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); + q_vector->itr_val = IGC_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igc_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igc_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * igc_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int igc_alloc_q_vectors(struct igc_adapter *adapter) +{ + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igc_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: Pointer to adapter structure + * @msix: boolean for MSI-X capability + * + * This function initializes the interrupts and allocates all of the queues. + */ +static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) +{ + struct net_device *dev = adapter->netdev; + int err = 0; + + igc_set_interrupt_capability(adapter, msix); + + err = igc_alloc_q_vectors(adapter); + if (err) { + netdev_err(dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igc_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igc_reset_interrupt_capability(adapter); + return err; +} + +/** + * igc_sw_init - Initialize general software structures (struct igc_adapter) + * @adapter: board private structure to initialize + * + * igc_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int igc_sw_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGC_DEFAULT_TXD; + adapter->rx_ring_count = IGC_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGC_DEFAULT_ITR; + adapter->tx_itr_setting = IGC_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; + + /* adjust max frame to be at least the size of a standard frame */ + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + mutex_init(&adapter->nfc_rule_lock); + INIT_LIST_HEAD(&adapter->nfc_rule_list); + adapter->nfc_rule_count = 0; + + spin_lock_init(&adapter->stats64_lock); + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGC_FLAG_HAS_MSIX; + + igc_init_queue_configuration(adapter); + + /* This call may decrease the number of queues */ + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igc_irq_disable(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + return 0; +} + +/** + * igc_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + */ +void igc_up(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i = 0; + + /* hardware has been reset, we need to reload some things */ + igc_configure(adapter); + + clear_bit(__IGC_DOWN, &adapter->state); + if (!get_ecdev(adapter)) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + } + if (adapter->msix_entries) + igc_configure_msix(adapter); + else + igc_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!get_ecdev(adapter)) { + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + } +} + +/** + * igc_update_stats - Update the board statistics counters + * @adapter: board private structure + */ +void igc_update_stats(struct igc_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.tlpic += rd32(IGC_TLPIC); + adapter->stats.rlpic += rd32(IGC_RLPIC); + adapter->stats.hgptc += rd32(IGC_HGPTC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + adapter->stats.colc += rd32(IGC_RERC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + + adapter->stats.iac += rd32(IGC_IAC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); +} + +/** + * igc_down - Close the interface + * @adapter: board private structure + */ +void igc_down(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i = 0; + + set_bit(__IGC_DOWN, &adapter->state); + + igc_ptp_suspend(adapter); + + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } + /* set trans_start so we don't get spurious watchdogs during reset */ + netif_trans_update(netdev); + if (!get_ecdev(adapter)) { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); + + igc_irq_disable(adapter); + } + + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (!get_ecdev(adapter) && adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igc_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + + igc_clean_all_tx_rings(adapter); + igc_clean_all_rx_rings(adapter); +} + +void igc_reinit_locked(struct igc_adapter *adapter) +{ + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igc_down(adapter); + igc_up(adapter); + clear_bit(__IGC_RESETTING, &adapter->state); +} + +static void igc_reset_task(struct work_struct *work) +{ + struct igc_adapter *adapter; + + adapter = container_of(work, struct igc_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igc_rings_dump(adapter); + igc_regs_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igc_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igc_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int igc_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(netdev, "Jumbo frames not supported with XDP"); + return -EINVAL; + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igc_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igc_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igc_up(adapter); + else + igc_reset(adapter); + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +/** + * igc_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + */ +static void igc_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + if (!test_bit(__IGC_RESETTING, &adapter->state)) + igc_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igc_vlan_mode(netdev, features); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) + igc_flush_nfc_rules(adapter); + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igc_tsync_interrupt(struct igc_adapter *adapter) +{ + u32 ack, tsauxc, sec, nsec, tsicr; + struct igc_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } + + if (tsicr & IGC_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= IGC_TSICR_TXTS; + } + + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(IGC_TSICR, ack); +} + +/** + * igc_msix_other - msix other interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t igc_msix_other(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_hw *hw = &adapter->hw; + u32 icr = rd32(IGC_ICR); + + /* reading ICR causes bit 31 of EICR to be cleared */ + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & IGC_ICR_LSC) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + wr32(IGC_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igc_write_itr(struct igc_q_vector *q_vector) +{ + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = IGC_ITR_VAL_MASK; + + itr_val |= IGC_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igc_msix_ring(int irq, void *data) +{ + struct igc_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igc_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_request_msix - Initialize MSI-X interrupts + * @adapter: Pointer to adapter structure + * + * igc_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + */ +static int igc_request_msix(struct igc_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + int i = 0, err = 0, vector = 0, free_vector = 0; + struct net_device *netdev = adapter->netdev; + if (get_ecdev(adapter)) { + /* avoid requesting MSI-X. */ + return 0; + } + + err = request_irq(adapter->msix_entries[vector].vector, + &igc_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igc_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igc_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: Pointer to adapter structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) +{ + igc_free_q_vectors(adapter); + igc_reset_interrupt_capability(adapter); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igc_update_phy_info(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + + igc_get_phy_info(&adapter->hw); +} + +/** + * igc_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + */ +bool igc_has_link(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case igc_media_type_copper: + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; + default: + case igc_media_type_unknown: + break; + } + + if (hw->mac.type == igc_i225 && + hw->phy.id == I225_I_PHY_ID) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +/** + * igc_watchdog - Timer Call-back + * @t: timer for the watchdog + */ +static void igc_watchdog(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igc_watchdog_task(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, + struct igc_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, retry_count = 20; + u32 link; + int i; + + if (get_ecdev(adapter)) + hw->mac.get_link_status = true; + + link = igc_has_link(adapter); + + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), link); + return; + } + + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(IGC_CTRL); + /* Link status message must follow this format */ + netdev_info(netdev, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & IGC_CTRL_TFCE) && + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : + (ctrl & IGC_CTRL_RFCE) ? "RX" : + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGC_FLAG_EEE) && + adapter->link_duplex == HALF_DUPLEX) { + netdev_info(netdev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); + adapter->hw.dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igc_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + netdev_err(netdev, "exceed max 2 second\n"); + } + } else { + netdev_err(netdev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Links status message must follow this format */ + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGC_FLAG_MAS_ENABLE) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGC_FLAG_MAS_ENABLE)) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(IGC_EICS, eics); + } else { + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } + + igc_ptp_tx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +/** + * igc_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr_msi(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(IGC_ICR); + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(IGC_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & IGC_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igc_free_irq(struct igc_adapter *adapter) +{ + if (get_ecdev(adapter)) { + /* no IRQ to free in EtherCAT operation */ + return; + } + + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igc_request_irq - initialize interrupts + * @adapter: Pointer to adapter structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static int igc_request_irq(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + err = igc_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + igc_clear_interrupt_scheme(adapter); + err = igc_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + igc_setup_all_tx_resources(adapter); + igc_setup_all_rx_resources(adapter); + igc_configure(adapter); + } + + igc_assign_vector(adapter->q_vector[0], 0); + + if (!get_ecdev(adapter) && adapter->flags & IGC_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, &igc_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igc_reset_interrupt_capability(adapter); + adapter->flags &= ~IGC_FLAG_HAS_MSI; + } + + if (!get_ecdev(adapter)) { + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + netdev_err(netdev, "Error %d getting interrupt\n", err); + } +request_done: + return err; +} + +/** + * __igc_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: boolean indicating if the device is resuming + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int __igc_open(struct net_device *netdev, bool resuming) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + int err = 0; + int i = 0; + + /* disallow open during test */ + + if (test_bit(__IGC_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); + } + else { + netif_carrier_off(netdev); + } + + /* allocate transmit descriptors */ + err = igc_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igc_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igc_power_up_link(adapter); + + igc_configure(adapter); + + err = igc_request_irq(adapter); + if (err) + goto err_req_irq; + + if (!get_ecdev(adapter)) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + + clear_bit(__IGC_DOWN, &adapter->state); + if (!get_ecdev(adapter)) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + } + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + if (!get_ecdev(adapter)) { + netif_tx_start_all_queues(netdev); + } + + if (!get_ecdev(adapter)) { + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + } + + return IGC_SUCCESS; + +err_set_queues: + igc_free_irq(adapter); +err_req_irq: + igc_release_hw_control(adapter); + igc_power_down_phy_copper_base(&adapter->hw); + igc_free_all_rx_resources(adapter); +err_setup_rx: + igc_free_all_tx_resources(adapter); +err_setup_tx: + igc_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igc_open(struct net_device *netdev) +{ + return __igc_open(netdev, false); +} + +/** + * __igc_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: boolean indicating the device is suspending + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int __igc_close(struct net_device *netdev, bool suspending) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igc_down(adapter); + + igc_release_hw_control(adapter); + + igc_free_irq(adapter); + + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +int igc_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igc_close(netdev, false); + return 0; +} + +/** + * igc_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return igc_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igc_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, + bool enable) +{ + struct igc_ring *ring; + int i; + + if (queue < 0 || queue >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + if (adapter->base_time) + return 0; + + adapter->cycle_time = NSEC_PER_SEC; + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + } + + return 0; +} + +static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) +{ + struct timespec64 b; + + b = ktime_to_timespec64(base_time); + + return timespec64_compare(now, &b) > 0; +} + +static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) +{ + int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct timespec64 now; + size_t n; + + if (qopt->cycle_time_extension) + return false; + + igc_ptp_read(adapter, &now); + + /* If we program the controller's BASET registers with a time + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. + */ + if (!is_base_time_past(qopt->base_time, &now)) + return false; + + for (n = 0; n < qopt->num_entries; n++) { + const struct tc_taprio_sched_entry *e; + int i; + + e = &qopt->entries[n]; + + /* i225 only supports "global" frame preemption + * settings. + */ + if (e->command != TC_TAPRIO_CMD_SET_GATES) + return false; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (e->gate_mask & BIT(i)) + queue_uses[i]++; + + if (queue_uses[i] > 1) + return false; + } + } + + return true; +} + +static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_save_qbv_schedule(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + u32 start_time = 0, end_time = 0; + size_t n; + + if (!qopt->enable) { + adapter->base_time = 0; + return 0; + } + + if (adapter->base_time) + return -EALREADY; + + if (!validate_schedule(adapter, qopt)) + return -EINVAL; + + adapter->cycle_time = qopt->cycle_time; + adapter->base_time = qopt->base_time; + + /* FIXME: be a little smarter about cases when the gate for a + * queue stays open for more than one entry. + */ + for (n = 0; n < qopt->num_entries; n++) { + struct tc_taprio_sched_entry *e = &qopt->entries[n]; + int i; + + end_time += e->interval; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!(e->gate_mask & BIT(i))) + continue; + + ring->start_time = start_time; + ring->end_time = end_time; + } + + start_time += e->interval; + } + + return 0; +} + +static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_qbv_schedule(adapter, qopt); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_TAPRIO: + return igc_tsn_enable_qbv_scheduling(adapter, type_data); + + case TC_SETUP_QDISC_ETF: + return igc_tsn_enable_launchtime(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return igc_xdp_setup_pool(adapter, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int i, drops; + + if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + + drops = 0; + for (i = 0; i < num_frames; i++) { + int err; + struct xdp_frame *xdpf = frames[i]; + + err = igc_xdp_init_tx_descriptor(ring, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return num_frames - drops; +} + +static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, + struct igc_q_vector *q_vector) +{ + struct igc_hw *hw = &adapter->hw; + u32 eics = 0; + + eics |= q_vector->eims_value; + wr32(IGC_EICS, eics); +} + +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_q_vector *q_vector; + struct igc_ring *ring; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igc_xdp_is_enabled(adapter)) + return -ENXIO; + + if (queue_id >= adapter->num_rx_queues) + return -EINVAL; + + ring = adapter->rx_ring[queue_id]; + + if (!ring->xsk_pool) + return -ENXIO; + + q_vector = adapter->q_vector[queue_id]; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + igc_trigger_rxtxq_interrupt(adapter, q_vector); + + return 0; +} + +static const struct net_device_ops igc_netdev_ops = { + .ndo_open = igc_open, + .ndo_stop = igc_close, + .ndo_start_xmit = igc_xmit_frame, + .ndo_set_rx_mode = igc_set_rx_mode, + .ndo_set_mac_address = igc_set_mac, + .ndo_change_mtu = igc_change_mtu, + .ndo_get_stats64 = igc_get_stats64, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, + .ndo_do_ioctl = igc_ioctl, + .ndo_setup_tc = igc_setup_tc, + .ndo_bpf = igc_bpf, + .ndo_xdp_xmit = igc_xdp_xmit, + .ndo_xsk_wakeup = igc_xsk_wakeup, +}; + +static void ec_kick_watchdog(struct irq_work *work) +{ + struct igc_adapter *adapter = + container_of(work, struct igc_adapter, ec_watchdog_kicker); + + schedule_work(&adapter->watchdog_task); +} + +/** +* ec_poll - EtherCAT poll routine +* @netdev: net device structure +* +* This function can never fail. +* +**/ +void ec_poll(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + int budget = 64; + + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + adapter->ec_watchdog_jiffies = jiffies; + irq_work_queue(&adapter->ec_watchdog_kicker); + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + if (q_vector->tx.ring) { + igc_clean_tx_irq(q_vector, budget); + } + + if (q_vector->rx.ring) { + igc_clean_rx_irq(q_vector, budget); + } + } +} + +/* PCIe configuration access */ +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_read_word(adapter->pdev, reg, value); + + return IGC_SUCCESS; +} + +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_write_word(adapter->pdev, reg, *value); + + return IGC_SUCCESS; +} + +u32 igc_rd32(struct igc_hw *hw, u32 reg) +{ + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igc->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + WARN(pci_device_is_present(igc->pdev), + "igc: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) +{ + struct igc_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = false; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = true; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + goto err_inval; + case SPEED_2500 + DUPLEX_FULL: + mac->autoneg = true; + adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; + break; + case SPEED_2500 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +/** + * igc_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igc_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igc_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring the adapter private structure, + * and a hardware reset occur. + */ +static int igc_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct igc_adapter *adapter; + struct net_device *netdev; + struct igc_hw *hw; + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; + int err, pci_using_dac; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, igc_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), + IGC_MAX_TX_QUEUES); + + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ecdev_initialized = false; + hw = &adapter->hw; + hw->back = adapter; + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = pci_save_state(pdev); + if (err) + goto err_ioremap; + + err = -EIO; + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) + goto err_ioremap; + + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igc_netdev_ops; + igc_ethtool_set_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_TSO_ECN; + netdev->features |= NETIF_F_RXCSUM; + netdev->features |= NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_HW_TC; + +#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; + + /* setup the private structure */ + err = igc_sw_init(adapter); + if (err) + goto err_sw_init; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= netdev->features; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(netdev, hw->mac.addr); +#else + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); +#endif + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* configure RXPBSIZE and TXPBSIZE */ + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igc_reset_task); + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0xaf; + + hw->fc.requested_mode = igc_fc_default; + hw->fc.current_mode = igc_fc_default; + + /* By default, support wake on port A */ + adapter->flags |= IGC_FLAG_WOL_SUPPORTED; + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) + adapter->wol |= IGC_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGC_FLAG_WOL_SUPPORTED); + + igc_ptp_init(adapter); + + /* reset the hardware with the new settings */ + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = true; + if (get_ecdev(adapter)) { + init_irq_work(&adapter->ec_watchdog_kicker, ec_kick_watchdog); + err = ecdev_open(get_ecdev(adapter)); + if (err) { + ecdev_withdraw(get_ecdev(adapter)); + goto err_register; + } + adapter->ec_watchdog_jiffies = jiffies; + } else { + strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + + /* print pcie link status and MAC address */ + pcie_print_link_status(pdev); + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* Disable EEE for internal PHY devices */ + hw->dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + igc_set_eee_i225(hw, false, false, false); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + igc_release_hw_control(adapter); +err_eeprom: + if (!igc_check_reset_block(hw)) + igc_reset_phy(hw); +err_sw_init: + igc_clear_interrupt_scheme(adapter); + iounmap(adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igc_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igc_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void igc_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); + irq_work_sync(&adapter->ec_watchdog_kicker); + ecdev_withdraw(get_ecdev(adapter)); + } + + pm_runtime_get_noresume(&pdev->dev); + + igc_flush_nfc_rules(adapter); + + igc_ptp_stop(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + if (!get_ecdev(adapter)) { + unregister_netdev(netdev); + } + + igc_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, adapter->io_addr); + pci_release_mem_regions(pdev); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; + struct igc_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + bool wake; + + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); + } else { + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igc_close(netdev, true); + } + + igc_ptp_suspend(adapter); + + igc_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_LU) + wufc &= ~IGC_WUFC_LNKC; + + if (wufc) { + igc_setup_rctl(adapter); + igc_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IGC_WUFC_MC) { + rctl = rd32(IGC_RCTL); + rctl |= IGC_RCTL_MPE; + wr32(IGC_RCTL, rctl); + } + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_ADVD3WUC; + wr32(IGC_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igc_disable_pcie_master(hw); + + wr32(IGC_WUC, IGC_WUC_PME_EN); + wr32(IGC_WUFC, wufc); + } else { + wr32(IGC_WUC, 0); + wr32(IGC_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igc_power_down_phy_copper_base(&adapter->hw); + else + igc_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int __maybe_unused igc_runtime_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 1); +} + +static void igc_deliver_wake_packet(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if (wupl == 0 || wupl > IGC_WUPM_BYTES) + return; + + skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igc_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + val = rd32(IGC_WUS); + if (val & WAKE_PKT_WUS) + igc_deliver_wake_packet(netdev); + + wr32(IGC_WUS, ~0); + + if (get_ecdev(adapter)) { + ecdev_open(get_ecdev(adapter)); + } else { + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igc_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + } + return err; +} + +static int __maybe_unused igc_runtime_resume(struct device *dev) +{ + return igc_resume(dev); +} + +static int __maybe_unused igc_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igc_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (!igc_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} +#endif /* CONFIG_PM */ + +static void igc_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igc_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * igc_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current PCI connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igc_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igc_io_slot_reset - called after the PCI bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igc_resume routine. + **/ +static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + netdev_err(netdev, "Could not re-enable PCI device after reset\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igc_reset(adapter); + wr32(IGC_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igc_io_resume - called when traffic can start to flow again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igc_resume routine. + */ +static void igc_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { + netdev_err(netdev, "igc_open failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers igc_err_handler = { + .error_detected = igc_io_error_detected, + .slot_reset = igc_io_slot_reset, + .resume = igc_io_resume, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops igc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) + SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle) +}; +#endif + +static struct pci_driver igc_driver = { + .name = igc_driver_name, + .id_table = igc_pci_tbl, + .probe = igc_probe, + .remove = igc_remove, +#ifdef CONFIG_PM + .driver.pm = &igc_pm_ops, +#endif + .shutdown = igc_shutdown, + .err_handler = &igc_err_handler, +}; + +/** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + +/** + * igc_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + */ +struct net_device *igc_get_hw_dev(struct igc_hw *hw) +{ + struct igc_adapter *adapter = hw->back; + + return adapter->netdev; +} + +static void igc_disable_rx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(IGC_RXDCTL(idx)); + rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; + rxdctl |= IGC_RXDCTL_SWFLUSH; + wr32(IGC_RXDCTL(idx), rxdctl); +} + +void igc_disable_rx_ring(struct igc_ring *ring) +{ + igc_disable_rx_ring_hw(ring); + igc_clean_rx_ring(ring); +} + +void igc_enable_rx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_rx_ring(adapter, ring); + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +} + +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +void igc_disable_tx_ring(struct igc_ring *ring) +{ + igc_disable_tx_ring_hw(ring); + igc_clean_tx_ring(ring); +} + +void igc_enable_tx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_tx_ring(adapter, ring); +} + +/** + * igc_init_module - Driver Registration Routine + * + * igc_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init igc_init_module(void) +{ + int ret; + + pr_info("%s\n", igc_driver_string); + pr_info("%s\n", igc_copyright); + + ret = pci_register_driver(&igc_driver); + return ret; +} + +module_init(igc_init_module); + +/** + * igc_exit_module - Driver Exit Cleanup Routine + * + * igc_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit igc_exit_module(void) +{ + pci_unregister_driver(&igc_driver); +} + +module_exit(igc_exit_module); +/* igc_main.c */ diff --git a/devices/igc/igc_main-5.14-orig.c b/devices/igc/igc_main-5.14-orig.c new file mode 100644 index 00000000..78114e62 --- /dev/null +++ b/devices/igc/igc_main-5.14-orig.c @@ -0,0 +1,6564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc.h" +#include "igc_hw.h" +#include "igc_tsn.h" +#include "igc_xdp.h" + +#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IGC_XDP_PASS 0 +#define IGC_XDP_CONSUMED BIT(0) +#define IGC_XDP_TX BIT(1) +#define IGC_XDP_REDIRECT BIT(2) + +static int debug = -1; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL v2"); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +char igc_driver_name[] = "igc"; +static const char igc_driver_string[] = DRV_SUMMARY; +static const char igc_copyright[] = + "Copyright(c) 2018 Intel Corporation."; + +static const struct igc_info *igc_info_tbl[] = { + [board_base] = &igc_base_info, +}; + +static const struct pci_device_id igc_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igc_pci_tbl); + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +void igc_reset(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition PBA for greater than 9k MTU if required */ + pba = IGC_PBA_34K; + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + hw->mac.ops.reset_hw(hw); + + if (hw->mac.ops.init_hw(hw)) + netdev_err(dev, "Error on hardware initialization\n"); + + /* Re-establish EEE setting */ + igc_set_eee_i225(hw, true, true, true); + + if (!netif_running(adapter->netdev)) + igc_power_down_phy_copper_base(&adapter->hw); + + /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ + wr32(IGC_VET, ETH_P_8021Q); + + /* Re-enable PTP, where applicable. */ + igc_ptp_reset(adapter); + + /* Re-enable TSN offloading, where applicable. */ + igc_tsn_offload_apply(adapter); + + igc_get_phy_info(hw); +} + +/** + * igc_power_up_link - Power up the phy link + * @adapter: address of board private structure + */ +static void igc_power_up_link(struct igc_adapter *adapter) +{ + igc_reset_phy(&adapter->hw); + + igc_power_up_phy_copper(&adapter->hw); + + igc_setup_link(&adapter->hw); +} + +/** + * igc_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void igc_release_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + if (!pci_device_is_present(adapter->pdev)) + return; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + */ +static void igc_get_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) +{ + dma_unmap_single(dev, dma_unmap_addr(buf, dma), + dma_unmap_len(buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(buf, len, 0); +} + +/** + * igc_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; + + while (i != tx_ring->next_to_use) { + union igc_adv_tx_desc *eop_desc, *tx_desc; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + dev_kfree_skb_any(tx_buffer->skb); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGC_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igc_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +void igc_free_tx_resources(struct igc_ring *tx_ring) +{ + igc_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igc_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void igc_free_all_tx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igc_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_tx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igc_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igc_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + */ +int igc_setup_tx_resources(struct igc_ring *tx_ring) +{ + struct net_device *ndev = tx_ring->netdev; + struct device *dev = tx_ring->dev; + int size = 0; + + size = sizeof(struct igc_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_tx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igc_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + netdev_err(dev, "Error on Tx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } +} + +static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) +{ + struct igc_rx_buffer *bi; + u16 i; + + for (i = 0; i < ring->count; i++) { + bi = &ring->rx_buffer_info[i]; + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} + +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *ring) +{ + if (ring->xsk_pool) + igc_clean_rx_ring_xsk_pool(ring); + else + igc_clean_rx_ring_page_shared(ring); + + clear_ring_uses_large_buffer(ring); + + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +/** + * igc_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_rx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igc_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igc_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +void igc_free_rx_resources(struct igc_ring *rx_ring) +{ + igc_clean_rx_ring(rx_ring); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igc_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void igc_free_all_rx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igc_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igc_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + */ +int igc_setup_rx_resources(struct igc_ring *rx_ring) +{ + struct net_device *ndev = rx_ring->netdev; + struct device *dev = rx_ring->dev; + u8 index = rx_ring->queue_index; + int size, desc_len, res; + + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, + rx_ring->q_vector->napi.napi_id); + if (res < 0) { + netdev_err(ndev, "Failed to register xdp_rxq index %u\n", + index); + return res; + } + + size = sizeof(struct igc_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + desc_len = sizeof(union igc_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_rx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igc_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + netdev_err(dev, "Error on Rx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + if (!igc_xdp_is_enabled(adapter) || + !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) + return NULL; + + return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); +} + +/** + * igc_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + union igc_adv_rx_desc *rx_desc; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + u64 rdba = ring->dma; + u32 buf_size; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } + + if (igc_xdp_is_enabled(adapter)) + set_ring_uses_large_buffer(ring); + + /* disable the queue */ + wr32(IGC_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(IGC_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(IGC_RDBAH(reg_idx), rdba >> 32); + wr32(IGC_RDLEN(reg_idx), + ring->count * sizeof(union igc_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); + wr32(IGC_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* reset next-to- use/clean to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGC_RXBUFFER_3072; + else + buf_size = IGC_RXBUFFER_2048; + + srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + wr32(IGC_SRRCTL(reg_idx), srrctl); + + rxdctl |= IGC_RX_PTHRESH; + rxdctl |= IGC_RX_HTHRESH << 8; + rxdctl |= IGC_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igc_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGC_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; + + wr32(IGC_RXDCTL(reg_idx), rxdctl); +} + +/** + * igc_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx(struct igc_adapter *adapter) +{ + int i; + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igc_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + */ +static void igc_configure_tx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + u32 txdctl = 0; + + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + + /* disable the queue */ + wr32(IGC_TXDCTL(reg_idx), 0); + wrfl(); + mdelay(10); + + wr32(IGC_TDLEN(reg_idx), + ring->count * sizeof(union igc_adv_tx_desc)); + wr32(IGC_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(IGC_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); + wr32(IGC_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGC_TX_PTHRESH; + txdctl |= IGC_TX_HTHRESH << 8; + txdctl |= IGC_TX_WTHRESH << 16; + + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + wr32(IGC_TXDCTL(reg_idx), txdctl); +} + +/** + * igc_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + */ +static void igc_configure_tx(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igc_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + */ +static void igc_setup_mrqc(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); +} + +/** + * igc_setup_rctl - configure the receive control registers + * @adapter: Board private structure + */ +static void igc_setup_rctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(IGC_RCTL); + + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); + + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + /* enable stripping of CRC. Newer features require + * that the HW strips the CRC. + */ + rctl |= IGC_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= IGC_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(IGC_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in set_rx_mode + */ + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ + IGC_RCTL_BAM | /* RX All Bcast Pkts */ + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ + } + + wr32(IGC_RCTL, rctl); +} + +/** + * igc_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + */ +static void igc_setup_tctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which icould be enabled by default */ + wr32(IGC_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); + + /* Enable transmits */ + tctl |= IGC_TCTL_EN; + + wr32(IGC_TCTL, tctl); +} + +/** + * igc_set_mac_filter_hw() - Set MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be set + * @index: Filter index + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + */ +static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, + enum igc_mac_filter_type type, + const u8 *addr, int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 ral, rah; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + ral = le32_to_cpup((__le32 *)(addr)); + rah = le16_to_cpup((__le16 *)(addr + 4)); + + if (type == IGC_MAC_FILTER_TYPE_SRC) { + rah &= ~IGC_RAH_ASEL_MASK; + rah |= IGC_RAH_ASEL_SRC_ADDR; + } + + if (queue >= 0) { + rah &= ~IGC_RAH_QSEL_MASK; + rah |= (queue << IGC_RAH_QSEL_SHIFT); + rah |= IGC_RAH_QSEL_ENABLE; + } + + rah |= IGC_RAH_AV; + + wr32(IGC_RAL(index), ral); + wr32(IGC_RAH(index), rah); + + netdev_dbg(dev, "MAC address filter set in HW: index %d", index); +} + +/** + * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be cleared + * @index: Filter index + */ +static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + wr32(IGC_RAL(index), 0); + wr32(IGC_RAH(index), 0); + + netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igc_set_default_mac_filter(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + u8 *addr = adapter->hw.mac.addr; + + netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); + + igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +/** + * igc_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int igc_set_mac(struct net_device *netdev, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igc_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igc_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igc_write_mc_addr_list(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igc_update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igc_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) +{ + ktime_t cycle_time = adapter->cycle_time; + ktime_t base_time = adapter->base_time; + u32 launchtime; + + /* FIXME: when using ETF together with taprio, we may have a + * case where 'delta' is larger than the cycle_time, this may + * cause problems if we don't read the current value of + * IGC_BASET, as the value writen into the launchtime + * descriptor field may be misinterpreted. + */ + div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); + + return cpu_to_le32(launchtime); +} + +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For i225, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid Tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + ktime_t txtime = first->skb->tstamp; + + skb_txtime_consumed(first->skb); + context_desc->launch_time = igc_tx_launchtime(adapter, + txtime); + } else { + context_desc->launch_time = 0; + } +} + +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); +} + +static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* memory barriier comment */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igc_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + if (igc_desc_unused(tx_ring) >= size) + return 0; + return __igc_maybe_stop_tx(tx_ring, size); +} + +#define IGC_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, + IGC_ADVTXD_DCMD_VLE); + + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igc_tx_olinfo_status(struct igc_ring *tx_ring, + union igc_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; + + /* insert L4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * + ((IGC_TXD_POPTS_TXSM << 8) / + IGC_TX_FLAGS_CSUM); + + /* insert IPv4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * + (((IGC_TXD_POPTS_IXSM << 8)) / + IGC_TX_FLAGS_IPV4); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int igc_tx_map(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 tx_flags = first->tx_flags; + skb_frag_t *frag; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + dma_addr_t dma; + u32 cmd_type; + + cmd_type = igc_tx_cmd_type(skb, tx_flags); + tx_desc = IGC_TX_DESC(tx_ring, i); + + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGC_MAX_DATA_PER_TXD; + size -= IGC_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGC_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + + return 0; +dma_error: + netdev_err(tx_ring->netdev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, + struct igc_ring *tx_ring) +{ + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + struct igc_tx_buffer *first; + u32 tx_flags = 0; + unsigned short f; + u8 hdr_len = 0; + int tso = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igc_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGC_TX_BUFFER_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + + /* FIXME: add support for retrieving timestamps from + * the other timer registers before skipping the + * timestamping request. + */ + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGC_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igc_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first); + + igc_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); +} + +static void igc_rx_checksum(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igc_test_staterr(rx_desc, + IGC_RXDEXT_STATERR_L4E | + IGC_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets (aka let the stack check the crc32c) + */ + if (!(skb->len == 60 && + test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | + IGC_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netdev_dbg(ring->netdev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +static inline void igc_rx_hash(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +static void igc_rx_vlan(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { + if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && + test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/** + * igc_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in order + * to populate the hash, checksum, VLAN, protocol, and other fields within the + * skb. + */ +static void igc_process_skb_fields(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + igc_rx_hash(rx_ring, rx_desc, skb); + + igc_rx_checksum(rx_ring, rx_desc, skb); + + igc_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl |= IGC_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~IGC_CTRL_VME; + } + wr32(IGC_CTRL, ctrl); +} + +static void igc_restore_vlan(struct igc_adapter *adapter) +{ + igc_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, + const unsigned int size, + int *rx_buffer_pgcnt) +{ + struct igc_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, + unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) + buffer->page_offset ^= truesize; +#else + buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(ring) / 2; +#else + truesize = ring_uses_build_skb(ring) ? + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +/** + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + */ +static void igc_add_rx_frag(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(rx_ring) / 2; +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + + igc_rx_buffer_flip(rx_buffer, truesize); +} + +static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + union igc_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(va); + + /* build an skb around the page buffer */ + skb = build_skb(va - IGC_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, IGC_SKB_PAD); + __skb_put(skb, size); + + igc_rx_buffer_flip(rx_buffer, truesize); + return skb; +} + +static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + void *va = xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(va); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGC_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + igc_rx_buffer_flip(rx_buffer, truesize); + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * igc_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void igc_reuse_rx_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct igc_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igc_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool igc_is_non_eop(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGC_RX_DESC(rx_ring, ntc)); + + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igc_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool igc_cleanup_headers(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void igc_put_rx_buffer(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + igc_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) +{ + struct igc_adapter *adapter = rx_ring->q_vector->adapter; + + if (ring_uses_build_skb(rx_ring)) + return IGC_SKB_PAD; + if (igc_xdp_is_enabled(adapter)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igc_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igc_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring + * @cleaned_count: number of buffers to clean + */ +static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) +{ + union igc_adv_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct igc_rx_buffer *bi; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGC_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igc_rx_bufsz(rx_ring); + + do { + if (!igc_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGC_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) +{ + union igc_adv_rx_desc *desc; + u16 i = ring->next_to_use; + struct igc_rx_buffer *bi; + dma_addr_t dma; + bool ok = true; + + if (!count) + return ok; + + desc = IGC_RX_DESC(ring, i); + bi = &ring->rx_buffer_info[i]; + i -= ring->count; + + do { + bi->xdp = xsk_buff_alloc(ring->xsk_pool); + if (!bi->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(bi->xdp); + desc->read.pkt_addr = cpu_to_le64(dma); + + desc++; + bi++; + i++; + if (unlikely(!i)) { + desc = IGC_RX_DESC(ring, 0); + bi = ring->rx_buffer_info; + i -= ring->count; + } + + /* Clear the length for the next_to_use descriptor. */ + desc->wb.upper.length = 0; + + count--; + } while (count); + + i += ring->count; + + if (ring->next_to_use != i) { + ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, ring->tail); + } + + return ok; +} + +static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, + struct xdp_frame *xdpf, + struct igc_ring *ring) +{ + dma_addr_t dma; + + dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->type = IGC_TX_BUFFER_TYPE_XDP; + buffer->xdpf = xdpf; + buffer->protocol = 0; + buffer->bytecount = xdpf->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, xdpf->len); + dma_unmap_addr_set(buffer, dma, dma); + return 0; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, + struct xdp_frame *xdpf) +{ + struct igc_tx_buffer *buffer; + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + buffer = &ring->tx_buffer_info[ring->next_to_use]; + err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + buffer->bytecount; + olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); + + netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); + + buffer->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= adapter->num_tx_queues) + index -= adapter->num_tx_queues; + + return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int res; + + if (unlikely(!xdpf)) + return -EFAULT; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + res = igc_xdp_init_tx_descriptor(ring, xdpf); + __netif_tx_unlock(nq); + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __igc_xdp_run_prog(struct igc_adapter *adapter, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + return IGC_XDP_PASS; + case XDP_TX: + if (igc_xdp_xmit_back(adapter, xdp) < 0) + goto out_failure; + return IGC_XDP_TX; + case XDP_REDIRECT: + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + goto out_failure; + return IGC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + return IGC_XDP_CONSUMED; + } +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(adapter->xdp_prog); + if (!prog) { + res = IGC_XDP_PASS; + goto out; + } + + res = __igc_xdp_run_prog(adapter, prog, xdp); + +out: + return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ + /* Once tail pointer is updated, hardware can fetch the descriptors + * any time so we issue a write membar here to ensure all memory + * writes are complete before the tail pointer is updated. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + + if (status & IGC_XDP_TX) { + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + igc_flush_tx_descriptors(ring); + __netif_tx_unlock(nq); + } + + if (status & IGC_XDP_REDIRECT) + xdp_do_flush(); +} + +static void igc_update_rx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + +static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = igc_desc_unused(rx_ring); + int xdp_status = 0, rx_buffer_pgcnt; + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *rx_desc; + struct igc_rx_buffer *rx_buffer; + unsigned int size, truesize; + ktime_t timestamp = 0; + struct xdp_buff xdp; + int pkt_offset = 0; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { + igc_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); + truesize = igc_get_rx_frame_truesize(rx_ring, size); + + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + pktbuf); + pkt_offset = IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + if (!skb) { + xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); + xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), + igc_rx_offset(rx_ring) + pkt_offset, size, false); + + skb = igc_xdp_run_prog(adapter, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + switch (xdp_res) { + case IGC_XDP_CONSUMED: + rx_buffer->pagecnt_bias++; + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + igc_rx_buffer_flip(rx_buffer, truesize); + xdp_status |= xdp_res; + break; + } + + total_packets++; + total_bytes += size; + } else if (skb) + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); + else + skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, + timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igc_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + igc_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (cleaned_count) + igc_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + unsigned int totalsize = metasize + datasize; + struct sk_buff *skb; + + skb = __napi_alloc_skb(&ring->q_vector->napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); + memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); + if (metasize) + skb_metadata_set(skb, metasize); + + return skb; +} + +static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, + union igc_adv_rx_desc *desc, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + struct igc_ring *ring = q_vector->rx.ring; + struct sk_buff *skb; + + skb = igc_construct_skb_zc(ring, xdp); + if (!skb) { + ring->rx_stats.alloc_failed++; + return; + } + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + if (igc_cleanup_headers(ring, desc, skb)) + return; + + igc_process_skb_fields(ring, desc, skb); + napi_gro_receive(&q_vector->napi, skb); +} + +static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *ring = q_vector->rx.ring; + u16 cleaned_count = igc_desc_unused(ring); + int total_bytes = 0, total_packets = 0; + u16 ntc = ring->next_to_clean; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + + rcu_read_lock(); + + prog = READ_ONCE(adapter->xdp_prog); + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *desc; + struct igc_rx_buffer *bi; + ktime_t timestamp = 0; + unsigned int size; + int res; + + desc = IGC_RX_DESC(ring, ntc); + size = le16_to_cpu(desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = &ring->rx_buffer_info[ntc]; + + if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + bi->xdp->data); + + bi->xdp->data += IGC_TS_HDR_LEN; + + /* HW timestamp has been copied into local variable. Metadata + * length when XDP program is called should be 0. + */ + bi->xdp->data_meta += IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + + res = __igc_xdp_run_prog(adapter, prog, bi->xdp); + switch (res) { + case IGC_XDP_PASS: + igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + fallthrough; + case IGC_XDP_CONSUMED: + xsk_buff_free(bi->xdp); + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + bi->xdp = NULL; + total_bytes += size; + total_packets++; + cleaned_count++; + ntc++; + if (ntc == ring->count) + ntc = 0; + } + + ring->next_to_clean = ntc; + rcu_read_unlock(); + + if (cleaned_count >= IGC_RX_BUFFER_WRITE) + failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (xsk_uses_need_wakeup(ring->xsk_pool)) { + if (failure || ring->next_to_clean == ring->next_to_use) + xsk_set_rx_need_wakeup(ring->xsk_pool); + else + xsk_clear_rx_need_wakeup(ring->xsk_pool); + return total_packets; + } + + return failure ? budget : total_packets; +} + +static void igc_update_tx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->tx.ring; + + u64_stats_update_begin(&ring->tx_syncp); + ring->tx_stats.bytes += bytes; + ring->tx_stats.packets += packets; + u64_stats_update_end(&ring->tx_syncp); + + q_vector->tx.total_bytes += bytes; + q_vector->tx.total_packets += packets; +} + +static void igc_xdp_xmit_zc(struct igc_ring *ring) +{ + struct xsk_buff_pool *pool = ring->xsk_pool; + struct netdev_queue *nq = txring_txq(ring); + union igc_adv_tx_desc *tx_desc = NULL; + int cpu = smp_processor_id(); + u16 ntu = ring->next_to_use; + struct xdp_desc xdp_desc; + u16 budget; + + if (!netif_carrier_ok(ring->netdev)) + return; + + __netif_tx_lock(nq, cpu); + + budget = igc_desc_unused(ring); + + while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + u32 cmd_type, olinfo_status; + struct igc_tx_buffer *bi; + dma_addr_t dma; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + xdp_desc.len; + olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; + + dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + + tx_desc = IGC_TX_DESC(ring, ntu); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + bi = &ring->tx_buffer_info[ntu]; + bi->type = IGC_TX_BUFFER_TYPE_XSK; + bi->protocol = 0; + bi->bytecount = xdp_desc.len; + bi->gso_segs = 1; + bi->time_stamp = jiffies; + bi->next_to_watch = tx_desc; + + netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); + + ntu++; + if (ntu == ring->count) + ntu = 0; + } + + ring->next_to_use = ntu; + if (tx_desc) { + igc_flush_tx_descriptors(ring); + xsk_tx_release(pool); + } + + __netif_tx_unlock(nq); +} + +/** + * igc_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + */ +static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + struct igc_ring *tx_ring = q_vector->tx.ring; + unsigned int i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 xsk_frames = 0; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGC_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + napi_consume_skb(tx_buffer->skb, napi_budget); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + igc_update_tx_stats(q_vector, total_packets, total_bytes); + + if (tx_ring->xsk_pool) { + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) + xsk_set_tx_need_wakeup(tx_ring->xsk_pool); + igc_xdp_xmit_zc(tx_ring); + } + + if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct igc_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { + /* detected Tx unit hang */ + netdev_err(tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + netif_carrier_ok(tx_ring->netdev) && + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGC_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +static int igc_find_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 ral, rah; + int i; + + for (i = 0; i < max_entries; i++) { + ral = rd32(IGC_RAL(i)); + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + continue; + if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) + continue; + if ((rah & IGC_RAH_RAH_MASK) != + le16_to_cpup((__le16 *)(addr + 4))) + continue; + if (ral != le32_to_cpup((__le32 *)(addr))) + continue; + + return i; + } + + return -1; +} + +static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 rah; + int i; + + for (i = 0; i < max_entries; i++) { + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + return i; + } + + return -1; +} + +/** + * igc_add_mac_filter() - Add MAC address filter + * @adapter: Pointer to adapter where the filter should be added + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr, + int queue) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index >= 0) + goto update_filter; + + index = igc_get_avail_mac_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", + index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr, queue); + +update_filter: + igc_set_mac_filter_hw(adapter, index, type, addr, queue); + return 0; +} + +/** + * igc_del_mac_filter() - Delete MAC address filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @type: MAC address filter type (source or destination) + * @addr: MAC address + */ +static void igc_del_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index < 0) + return; + + if (index == 0) { + /* If this is the default filter, we don't actually delete it. + * We just reset to its default value i.e. disable queue + * assignment. + */ + netdev_dbg(dev, "Disable default MAC filter queue assignment"); + + igc_set_mac_filter_hw(adapter, 0, type, addr, -1); + } else { + netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", + index, + type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr); + + igc_clear_mac_filter_hw(adapter, index); + } +} + +/** + * igc_add_vlan_prio_filter() - Add VLAN priority filter + * @adapter: Pointer to adapter where the filter should be added + * @prio: VLAN priority value + * @queue: Queue number which matching frames are assigned to + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, + int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + if (vlanpqf & IGC_VLANPQF_VALID(prio)) { + netdev_dbg(dev, "VLAN priority filter already in use\n"); + return -EEXIST; + } + + vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); + vlanpqf |= IGC_VLANPQF_VALID(prio); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", + prio, queue); + return 0; +} + +/** + * igc_del_vlan_prio_filter() - Delete VLAN priority filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @prio: VLAN priority value + */ +static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) +{ + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + vlanpqf &= ~IGC_VLANPQF_VALID(prio); + vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", + prio); +} + +static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if (!(etqf & IGC_ETQF_FILTER_ENABLE)) + return i; + } + + return -1; +} + +/** + * igc_add_etype_filter() - Add ethertype filter + * @adapter: Pointer to adapter where the filter should be added + * @etype: Ethertype value + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, + int queue) +{ + struct igc_hw *hw = &adapter->hw; + int index; + u32 etqf; + + index = igc_get_avail_etype_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + etqf = rd32(IGC_ETQF(index)); + + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= etype; + + if (queue >= 0) { + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); + etqf |= IGC_ETQF_QUEUE_ENABLE; + } + + etqf |= IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(index), etqf); + + netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", + etype, queue); + return 0; +} + +static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) + return i; + } + + return -1; +} + +/** + * igc_del_etype_filter() - Delete ethertype filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @etype: Ethertype value + */ +static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int index; + + index = igc_find_etype_filter(adapter, etype); + if (index < 0) + return; + + wr32(IGC_ETQF(index), 0); + + netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", + etype); +} + +static int igc_enable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + int err; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_add_etype_filter(adapter, rule->filter.etype, + rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + err = igc_add_vlan_prio_filter(adapter, prio, rule->action); + if (err) + return err; + } + + return 0; +} + +static void igc_disable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_del_etype_filter(adapter, rule->filter.etype); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + igc_del_vlan_prio_filter(adapter, prio); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr); +} + +/** + * igc_get_nfc_rule() - Get NFC rule + * @adapter: Pointer to adapter + * @location: Rule location + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: Pointer to NFC rule at @location. If not found, NULL. + */ +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location) +{ + struct igc_nfc_rule *rule; + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (rule->location == location) + return rule; + if (rule->location > location) + break; + } + + return NULL; +} + +/** + * igc_del_nfc_rule() - Delete NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be deleted + * + * Disable NFC rule in hardware and delete it from adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + */ +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + igc_disable_nfc_rule(adapter, rule); + + list_del(&rule->list); + adapter->nfc_rule_count--; + + kfree(rule); +} + +static void igc_flush_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule, *tmp; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +/** + * igc_add_nfc_rule() - Add NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be added + * + * Enable NFC rule in hardware and add it to adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 on success, negative errno on failure. + */ +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + struct igc_nfc_rule *pred, *cur; + int err; + + err = igc_enable_nfc_rule(adapter, rule); + if (err) + return err; + + pred = NULL; + list_for_each_entry(cur, &adapter->nfc_rule_list, list) { + if (cur->location >= rule->location) + break; + pred = cur; + } + + list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); + adapter->nfc_rule_count++; + return 0; +} + +static void igc_restore_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) + igc_enable_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); + return 0; +} + +/** + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + */ +static void igc_set_rx_mode(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IGC_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igc_write_mc_addr_list(netdev); + if (count < 0) + rctl |= IGC_RCTL_MPE; + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) + rctl |= IGC_RCTL_UPE; + + /* update state of unicast and multicast */ + rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); + wr32(IGC_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) + rlpml = IGC_MAX_FRAME_BUILD_SKB; +#endif + wr32(IGC_RLPML, rlpml); +} + +/** + * igc_configure - configure the hardware for RX and TX + * @adapter: private board structure + */ +static void igc_configure(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i = 0; + + igc_get_hw_control(adapter); + igc_set_rx_mode(netdev); + + igc_restore_vlan(adapter); + + igc_setup_tctl(adapter); + igc_setup_mrqc(adapter); + igc_setup_rctl(adapter); + + igc_set_default_mac_filter(adapter); + igc_restore_nfc_rules(adapter); + + igc_configure_tx(adapter); + igc_configure_rx(adapter); + + igc_rx_fifo_flush_base(&adapter->hw); + + /* call igc_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); + } +} + +/** + * igc_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + */ +static void igc_write_ivar(struct igc_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(IGC_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; + + array_wr32(IGC_IVAR0, index, ivar); +} + +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + int rx_queue = IGC_N0_QUEUE; + int tx_queue = IGC_N0_QUEUE; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case igc_i225: + if (rx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igc_configure_msix - Configure MSI-X hardware + * @adapter: Pointer to adapter structure + * + * igc_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + */ +static void igc_configure_msix(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i, vector = 0; + u32 tmp; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case igc_i225: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | IGC_IVAR_VALID) << 8; + + wr32(IGC_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igc_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void igc_irq_enable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; + u32 regval = rd32(IGC_EIAC); + + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(IGC_EIAM); + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); + wr32(IGC_EIMS, adapter->eims_enable_mask); + wr32(IGC_IMS, ims); + } else { + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + } +} + +/** + * igc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void igc_irq_disable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 regval = rd32(IGC_EIAM); + + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); + wr32(IGC_EIMC, adapter->eims_enable_mask); + regval = rd32(IGC_EIAC); + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(IGC_IAM, 0); + wr32(IGC_IMC, ~0); + wrfl(); + + if (adapter->msix_entries) { + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) +{ + /* Determine if we need to pair queues. */ + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; +} + +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +{ + return IGC_MAX_RX_QUEUES; +} + +static void igc_init_queue_configuration(struct igc_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igc_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igc_set_flag_queue_pairs(adapter, max_rss_queues); +} + +/** + * igc_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igc_free_q_vector. + */ +static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* if we're coming from igc_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); +} + +/** + * igc_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + */ +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igc_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igc_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + */ +static void igc_free_q_vectors(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igc_reset_q_vector(adapter, v_idx); + igc_free_q_vector(adapter, v_idx); + } +} + +/** + * igc_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + */ +static void igc_update_itr(struct igc_q_vector *q_vector, + struct igc_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes / packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igc_set_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + current_itr = 0; + new_itr = IGC_4K_ITR; + goto set_itr_now; + default: + break; + } + + igc_update_itr(q_vector, &q_vector->tx); + igc_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igc_reset_interrupt_capability(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } + + while (v_idx--) + igc_reset_q_vector(adapter, v_idx); +} + +/** + * igc_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: Pointer to adapter structure + * @msix: boolean value for MSI-X capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix) +{ + int numvecs, i; + int err; + + if (!msix) + goto msi_only; + adapter->flags |= IGC_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) + return; + + /* populate entry values */ + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + igc_reset_interrupt_capability(adapter); + +msi_only: + adapter->flags &= ~IGC_FLAG_HAS_MSIX; + + adapter->rss_queues = 1; + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGC_FLAG_HAS_MSI; +} + +/** + * igc_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igc_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + */ +static void igc_update_ring_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGC_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGC_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGC_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +static void igc_ring_irq_enable(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igc_set_itr(q_vector); + else + igc_update_ring_itr(q_vector); + } + + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(IGC_EIMS, q_vector->eims_value); + else + igc_irq_enable(adapter); + } +} + +static void igc_add_ring(struct igc_ring *ring, + struct igc_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igc_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + */ +static void igc_cache_ring_register(struct igc_adapter *adapter) +{ + int i = 0, j = 0; + + switch (adapter->hw.mac.type) { + case igc_i225: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = j; + break; + } +} + +/** + * igc_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + */ +static int igc_poll(struct napi_struct *napi, int budget) +{ + struct igc_q_vector *q_vector = container_of(napi, + struct igc_q_vector, + napi); + struct igc_ring *rx_ring = q_vector->rx.ring; + bool clean_complete = true; + int work_done = 0; + + if (q_vector->tx.ring) + clean_complete = igc_clean_tx_irq(q_vector, budget); + + if (rx_ring) { + int cleaned = rx_ring->xsk_pool ? + igc_clean_rx_irq_zc(q_vector, budget) : + igc_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igc_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igc_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int igc_alloc_q_vector(struct igc_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct igc_q_vector *q_vector; + struct igc_ring *ring; + int ring_count; + + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + else + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igc_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); + q_vector->itr_val = IGC_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igc_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igc_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * igc_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int igc_alloc_q_vectors(struct igc_adapter *adapter) +{ + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igc_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: Pointer to adapter structure + * @msix: boolean for MSI-X capability + * + * This function initializes the interrupts and allocates all of the queues. + */ +static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) +{ + struct net_device *dev = adapter->netdev; + int err = 0; + + igc_set_interrupt_capability(adapter, msix); + + err = igc_alloc_q_vectors(adapter); + if (err) { + netdev_err(dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igc_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igc_reset_interrupt_capability(adapter); + return err; +} + +/** + * igc_sw_init - Initialize general software structures (struct igc_adapter) + * @adapter: board private structure to initialize + * + * igc_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int igc_sw_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGC_DEFAULT_TXD; + adapter->rx_ring_count = IGC_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGC_DEFAULT_ITR; + adapter->tx_itr_setting = IGC_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; + + /* adjust max frame to be at least the size of a standard frame */ + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + mutex_init(&adapter->nfc_rule_lock); + INIT_LIST_HEAD(&adapter->nfc_rule_list); + adapter->nfc_rule_count = 0; + + spin_lock_init(&adapter->stats64_lock); + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGC_FLAG_HAS_MSIX; + + igc_init_queue_configuration(adapter); + + /* This call may decrease the number of queues */ + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igc_irq_disable(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + return 0; +} + +/** + * igc_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + */ +void igc_up(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i = 0; + + /* hardware has been reset, we need to reload some things */ + igc_configure(adapter); + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + if (adapter->msix_entries) + igc_configure_msix(adapter); + else + igc_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); +} + +/** + * igc_update_stats - Update the board statistics counters + * @adapter: board private structure + */ +void igc_update_stats(struct igc_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.tlpic += rd32(IGC_TLPIC); + adapter->stats.rlpic += rd32(IGC_RLPIC); + adapter->stats.hgptc += rd32(IGC_HGPTC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + adapter->stats.colc += rd32(IGC_RERC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + + adapter->stats.iac += rd32(IGC_IAC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); +} + +/** + * igc_down - Close the interface + * @adapter: board private structure + */ +void igc_down(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i = 0; + + set_bit(__IGC_DOWN, &adapter->state); + + igc_ptp_suspend(adapter); + + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } + /* set trans_start so we don't get spurious watchdogs during reset */ + netif_trans_update(netdev); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); + + igc_irq_disable(adapter); + } + + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igc_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + + igc_clean_all_tx_rings(adapter); + igc_clean_all_rx_rings(adapter); +} + +void igc_reinit_locked(struct igc_adapter *adapter) +{ + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igc_down(adapter); + igc_up(adapter); + clear_bit(__IGC_RESETTING, &adapter->state); +} + +static void igc_reset_task(struct work_struct *work) +{ + struct igc_adapter *adapter; + + adapter = container_of(work, struct igc_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igc_rings_dump(adapter); + igc_regs_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igc_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igc_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int igc_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(netdev, "Jumbo frames not supported with XDP"); + return -EINVAL; + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igc_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igc_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igc_up(adapter); + else + igc_reset(adapter); + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +/** + * igc_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + */ +static void igc_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + if (!test_bit(__IGC_RESETTING, &adapter->state)) + igc_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igc_vlan_mode(netdev, features); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) + igc_flush_nfc_rules(adapter); + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igc_tsync_interrupt(struct igc_adapter *adapter) +{ + u32 ack, tsauxc, sec, nsec, tsicr; + struct igc_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } + + if (tsicr & IGC_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= IGC_TSICR_TXTS; + } + + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(IGC_TSICR, ack); +} + +/** + * igc_msix_other - msix other interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t igc_msix_other(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_hw *hw = &adapter->hw; + u32 icr = rd32(IGC_ICR); + + /* reading ICR causes bit 31 of EICR to be cleared */ + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & IGC_ICR_LSC) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + wr32(IGC_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igc_write_itr(struct igc_q_vector *q_vector) +{ + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = IGC_ITR_VAL_MASK; + + itr_val |= IGC_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igc_msix_ring(int irq, void *data) +{ + struct igc_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igc_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_request_msix - Initialize MSI-X interrupts + * @adapter: Pointer to adapter structure + * + * igc_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + */ +static int igc_request_msix(struct igc_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + int i = 0, err = 0, vector = 0, free_vector = 0; + struct net_device *netdev = adapter->netdev; + + err = request_irq(adapter->msix_entries[vector].vector, + &igc_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igc_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igc_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: Pointer to adapter structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) +{ + igc_free_q_vectors(adapter); + igc_reset_interrupt_capability(adapter); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igc_update_phy_info(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + + igc_get_phy_info(&adapter->hw); +} + +/** + * igc_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + */ +bool igc_has_link(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case igc_media_type_copper: + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; + default: + case igc_media_type_unknown: + break; + } + + if (hw->mac.type == igc_i225 && + hw->phy.id == I225_I_PHY_ID) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +/** + * igc_watchdog - Timer Call-back + * @t: timer for the watchdog + */ +static void igc_watchdog(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igc_watchdog_task(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, + struct igc_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, retry_count = 20; + u32 link; + int i; + + link = igc_has_link(adapter); + + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(IGC_CTRL); + /* Link status message must follow this format */ + netdev_info(netdev, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & IGC_CTRL_TFCE) && + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : + (ctrl & IGC_CTRL_RFCE) ? "RX" : + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGC_FLAG_EEE) && + adapter->link_duplex == HALF_DUPLEX) { + netdev_info(netdev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); + adapter->hw.dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igc_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + netdev_err(netdev, "exceed max 2 second\n"); + } + } else { + netdev_err(netdev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Links status message must follow this format */ + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGC_FLAG_MAS_ENABLE) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGC_FLAG_MAS_ENABLE)) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(IGC_EICS, eics); + } else { + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } + + igc_ptp_tx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +/** + * igc_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr_msi(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(IGC_ICR); + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(IGC_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & IGC_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igc_free_irq(struct igc_adapter *adapter) +{ + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igc_request_irq - initialize interrupts + * @adapter: Pointer to adapter structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static int igc_request_irq(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + err = igc_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + igc_clear_interrupt_scheme(adapter); + err = igc_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + igc_setup_all_tx_resources(adapter); + igc_setup_all_rx_resources(adapter); + igc_configure(adapter); + } + + igc_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGC_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, &igc_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igc_reset_interrupt_capability(adapter); + adapter->flags &= ~IGC_FLAG_HAS_MSI; + } + + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + netdev_err(netdev, "Error %d getting interrupt\n", err); + +request_done: + return err; +} + +/** + * __igc_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: boolean indicating if the device is resuming + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int __igc_open(struct net_device *netdev, bool resuming) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + int err = 0; + int i = 0; + + /* disallow open during test */ + + if (test_bit(__IGC_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igc_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igc_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igc_power_up_link(adapter); + + igc_configure(adapter); + + err = igc_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + netif_tx_start_all_queues(netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + + return IGC_SUCCESS; + +err_set_queues: + igc_free_irq(adapter); +err_req_irq: + igc_release_hw_control(adapter); + igc_power_down_phy_copper_base(&adapter->hw); + igc_free_all_rx_resources(adapter); +err_setup_rx: + igc_free_all_tx_resources(adapter); +err_setup_tx: + igc_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igc_open(struct net_device *netdev) +{ + return __igc_open(netdev, false); +} + +/** + * __igc_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: boolean indicating the device is suspending + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int __igc_close(struct net_device *netdev, bool suspending) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igc_down(adapter); + + igc_release_hw_control(adapter); + + igc_free_irq(adapter); + + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +int igc_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igc_close(netdev, false); + return 0; +} + +/** + * igc_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return igc_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igc_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, + bool enable) +{ + struct igc_ring *ring; + int i; + + if (queue < 0 || queue >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + if (adapter->base_time) + return 0; + + adapter->cycle_time = NSEC_PER_SEC; + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + } + + return 0; +} + +static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) +{ + struct timespec64 b; + + b = ktime_to_timespec64(base_time); + + return timespec64_compare(now, &b) > 0; +} + +static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) +{ + int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct timespec64 now; + size_t n; + + if (qopt->cycle_time_extension) + return false; + + igc_ptp_read(adapter, &now); + + /* If we program the controller's BASET registers with a time + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. + */ + if (!is_base_time_past(qopt->base_time, &now)) + return false; + + for (n = 0; n < qopt->num_entries; n++) { + const struct tc_taprio_sched_entry *e; + int i; + + e = &qopt->entries[n]; + + /* i225 only supports "global" frame preemption + * settings. + */ + if (e->command != TC_TAPRIO_CMD_SET_GATES) + return false; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (e->gate_mask & BIT(i)) + queue_uses[i]++; + + if (queue_uses[i] > 1) + return false; + } + } + + return true; +} + +static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_save_qbv_schedule(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + u32 start_time = 0, end_time = 0; + size_t n; + + if (!qopt->enable) { + adapter->base_time = 0; + return 0; + } + + if (adapter->base_time) + return -EALREADY; + + if (!validate_schedule(adapter, qopt)) + return -EINVAL; + + adapter->cycle_time = qopt->cycle_time; + adapter->base_time = qopt->base_time; + + /* FIXME: be a little smarter about cases when the gate for a + * queue stays open for more than one entry. + */ + for (n = 0; n < qopt->num_entries; n++) { + struct tc_taprio_sched_entry *e = &qopt->entries[n]; + int i; + + end_time += e->interval; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!(e->gate_mask & BIT(i))) + continue; + + ring->start_time = start_time; + ring->end_time = end_time; + } + + start_time += e->interval; + } + + return 0; +} + +static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_qbv_schedule(adapter, qopt); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_TAPRIO: + return igc_tsn_enable_qbv_scheduling(adapter, type_data); + + case TC_SETUP_QDISC_ETF: + return igc_tsn_enable_launchtime(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return igc_xdp_setup_pool(adapter, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int i, drops; + + if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + + drops = 0; + for (i = 0; i < num_frames; i++) { + int err; + struct xdp_frame *xdpf = frames[i]; + + err = igc_xdp_init_tx_descriptor(ring, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return num_frames - drops; +} + +static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, + struct igc_q_vector *q_vector) +{ + struct igc_hw *hw = &adapter->hw; + u32 eics = 0; + + eics |= q_vector->eims_value; + wr32(IGC_EICS, eics); +} + +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_q_vector *q_vector; + struct igc_ring *ring; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igc_xdp_is_enabled(adapter)) + return -ENXIO; + + if (queue_id >= adapter->num_rx_queues) + return -EINVAL; + + ring = adapter->rx_ring[queue_id]; + + if (!ring->xsk_pool) + return -ENXIO; + + q_vector = adapter->q_vector[queue_id]; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + igc_trigger_rxtxq_interrupt(adapter, q_vector); + + return 0; +} + +static const struct net_device_ops igc_netdev_ops = { + .ndo_open = igc_open, + .ndo_stop = igc_close, + .ndo_start_xmit = igc_xmit_frame, + .ndo_set_rx_mode = igc_set_rx_mode, + .ndo_set_mac_address = igc_set_mac, + .ndo_change_mtu = igc_change_mtu, + .ndo_get_stats64 = igc_get_stats64, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, + .ndo_do_ioctl = igc_ioctl, + .ndo_setup_tc = igc_setup_tc, + .ndo_bpf = igc_bpf, + .ndo_xdp_xmit = igc_xdp_xmit, + .ndo_xsk_wakeup = igc_xsk_wakeup, +}; + +/* PCIe configuration access */ +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_read_word(adapter->pdev, reg, value); + + return IGC_SUCCESS; +} + +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_write_word(adapter->pdev, reg, *value); + + return IGC_SUCCESS; +} + +u32 igc_rd32(struct igc_hw *hw, u32 reg) +{ + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igc->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + WARN(pci_device_is_present(igc->pdev), + "igc: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) +{ + struct igc_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = false; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = true; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + goto err_inval; + case SPEED_2500 + DUPLEX_FULL: + mac->autoneg = true; + adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; + break; + case SPEED_2500 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +/** + * igc_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igc_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igc_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring the adapter private structure, + * and a hardware reset occur. + */ +static int igc_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct igc_adapter *adapter; + struct net_device *netdev; + struct igc_hw *hw; + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; + int err, pci_using_dac; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, igc_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), + IGC_MAX_TX_QUEUES); + + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = pci_save_state(pdev); + if (err) + goto err_ioremap; + + err = -EIO; + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) + goto err_ioremap; + + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igc_netdev_ops; + igc_ethtool_set_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_TSO_ECN; + netdev->features |= NETIF_F_RXCSUM; + netdev->features |= NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_HW_TC; + +#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; + + /* setup the private structure */ + err = igc_sw_init(adapter); + if (err) + goto err_sw_init; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= netdev->features; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* configure RXPBSIZE and TXPBSIZE */ + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igc_reset_task); + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0xaf; + + hw->fc.requested_mode = igc_fc_default; + hw->fc.current_mode = igc_fc_default; + + /* By default, support wake on port A */ + adapter->flags |= IGC_FLAG_WOL_SUPPORTED; + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) + adapter->wol |= IGC_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGC_FLAG_WOL_SUPPORTED); + + igc_ptp_init(adapter); + + /* reset the hardware with the new settings */ + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + + /* print pcie link status and MAC address */ + pcie_print_link_status(pdev); + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* Disable EEE for internal PHY devices */ + hw->dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + igc_set_eee_i225(hw, false, false, false); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + igc_release_hw_control(adapter); +err_eeprom: + if (!igc_check_reset_block(hw)) + igc_reset_phy(hw); +err_sw_init: + igc_clear_interrupt_scheme(adapter); + iounmap(adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igc_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igc_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void igc_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_noresume(&pdev->dev); + + igc_flush_nfc_rules(adapter); + + igc_ptp_stop(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + unregister_netdev(netdev); + + igc_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, adapter->io_addr); + pci_release_mem_regions(pdev); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; + struct igc_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + bool wake; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igc_close(netdev, true); + + igc_ptp_suspend(adapter); + + igc_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_LU) + wufc &= ~IGC_WUFC_LNKC; + + if (wufc) { + igc_setup_rctl(adapter); + igc_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IGC_WUFC_MC) { + rctl = rd32(IGC_RCTL); + rctl |= IGC_RCTL_MPE; + wr32(IGC_RCTL, rctl); + } + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_ADVD3WUC; + wr32(IGC_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igc_disable_pcie_master(hw); + + wr32(IGC_WUC, IGC_WUC_PME_EN); + wr32(IGC_WUFC, wufc); + } else { + wr32(IGC_WUC, 0); + wr32(IGC_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igc_power_down_phy_copper_base(&adapter->hw); + else + igc_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int __maybe_unused igc_runtime_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 1); +} + +static void igc_deliver_wake_packet(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if (wupl == 0 || wupl > IGC_WUPM_BYTES) + return; + + skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igc_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + val = rd32(IGC_WUS); + if (val & WAKE_PKT_WUS) + igc_deliver_wake_packet(netdev); + + wr32(IGC_WUS, ~0); + + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igc_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; +} + +static int __maybe_unused igc_runtime_resume(struct device *dev) +{ + return igc_resume(dev); +} + +static int __maybe_unused igc_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igc_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (!igc_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} +#endif /* CONFIG_PM */ + +static void igc_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igc_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * igc_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current PCI connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igc_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igc_io_slot_reset - called after the PCI bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igc_resume routine. + **/ +static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + netdev_err(netdev, "Could not re-enable PCI device after reset\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igc_reset(adapter); + wr32(IGC_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igc_io_resume - called when traffic can start to flow again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igc_resume routine. + */ +static void igc_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { + netdev_err(netdev, "igc_open failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers igc_err_handler = { + .error_detected = igc_io_error_detected, + .slot_reset = igc_io_slot_reset, + .resume = igc_io_resume, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops igc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) + SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle) +}; +#endif + +static struct pci_driver igc_driver = { + .name = igc_driver_name, + .id_table = igc_pci_tbl, + .probe = igc_probe, + .remove = igc_remove, +#ifdef CONFIG_PM + .driver.pm = &igc_pm_ops, +#endif + .shutdown = igc_shutdown, + .err_handler = &igc_err_handler, +}; + +/** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + +/** + * igc_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + */ +struct net_device *igc_get_hw_dev(struct igc_hw *hw) +{ + struct igc_adapter *adapter = hw->back; + + return adapter->netdev; +} + +static void igc_disable_rx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(IGC_RXDCTL(idx)); + rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; + rxdctl |= IGC_RXDCTL_SWFLUSH; + wr32(IGC_RXDCTL(idx), rxdctl); +} + +void igc_disable_rx_ring(struct igc_ring *ring) +{ + igc_disable_rx_ring_hw(ring); + igc_clean_rx_ring(ring); +} + +void igc_enable_rx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_rx_ring(adapter, ring); + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +} + +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +void igc_disable_tx_ring(struct igc_ring *ring) +{ + igc_disable_tx_ring_hw(ring); + igc_clean_tx_ring(ring); +} + +void igc_enable_tx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_tx_ring(adapter, ring); +} + +/** + * igc_init_module - Driver Registration Routine + * + * igc_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init igc_init_module(void) +{ + int ret; + + pr_info("%s\n", igc_driver_string); + pr_info("%s\n", igc_copyright); + + ret = pci_register_driver(&igc_driver); + return ret; +} + +module_init(igc_init_module); + +/** + * igc_exit_module - Driver Exit Cleanup Routine + * + * igc_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit igc_exit_module(void) +{ + pci_unregister_driver(&igc_driver); +} + +module_exit(igc_exit_module); +/* igc_main.c */ diff --git a/devices/igc/igc_main-6.1-ethercat.c b/devices/igc/igc_main-6.1-ethercat.c new file mode 100644 index 00000000..daaa7f17 --- /dev/null +++ b/devices/igc/igc_main-6.1-ethercat.c @@ -0,0 +1,7300 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "igc-6.1-ethercat.h" +#include "igc_hw-6.1-ethercat.h" +#include "igc_tsn-6.1-ethercat.h" +#include "igc_xdp-6.1-ethercat.h" + +#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver (EtherCAT enabled)" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IGC_XDP_PASS 0 +#define IGC_XDP_CONSUMED BIT(0) +#define IGC_XDP_TX BIT(1) +#define IGC_XDP_REDIRECT BIT(2) + +static int debug = -1; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL v2"); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +char igc_driver_name[] = "ec_igc"; +static const char igc_driver_string[] = DRV_SUMMARY; +static const char igc_copyright[] = + "Copyright(c) 2018 Intel Corporation."; + +static const struct igc_info *igc_info_tbl[] = { + [board_base] = &igc_base_info, +}; + +static const struct pci_device_id igc_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, + /* required last entry */ + {0, } +}; + +// MODULE_DEVICE_TABLE(pci, igc_pci_tbl); + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +void igc_reset(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition PBA for greater than 9k MTU if required */ + pba = IGC_PBA_34K; + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + hw->mac.ops.reset_hw(hw); + + if (hw->mac.ops.init_hw(hw)) + netdev_err(dev, "Error on hardware initialization\n"); + + /* Re-establish EEE setting */ + igc_set_eee_i225(hw, true, true, true); + + if (!netif_running(adapter->netdev)) + igc_power_down_phy_copper_base(&adapter->hw); + + /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ + wr32(IGC_VET, ETH_P_8021Q); + + /* Re-enable PTP, where applicable. */ + igc_ptp_reset(adapter); + + /* Re-enable TSN offloading, where applicable. */ + igc_tsn_reset(adapter); + + igc_get_phy_info(hw); +} + +/** + * igc_power_up_link - Power up the phy link + * @adapter: address of board private structure + */ +static void igc_power_up_link(struct igc_adapter *adapter) +{ + igc_reset_phy(&adapter->hw); + + igc_power_up_phy_copper(&adapter->hw); + + igc_setup_link(&adapter->hw); +} + +/** + * igc_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void igc_release_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + if (!pci_device_is_present(adapter->pdev)) + return; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + */ +static void igc_get_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) +{ + dma_unmap_single(dev, dma_unmap_addr(buf, dma), + dma_unmap_len(buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(buf, len, 0); +} + +/** + * igc_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; + + while (i != tx_ring->next_to_use) { + union igc_adv_tx_desc *eop_desc, *tx_desc; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + if (!get_ecdev(adapter)) { + /* skb is reused in EtherCAT TX operation */ + dev_kfree_skb_any(tx_buffer->skb); + } + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + } + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGC_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* Zero out the buffer ring */ + memset(tx_ring->tx_buffer_info, 0, + sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igc_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +void igc_free_tx_resources(struct igc_ring *tx_ring) +{ + igc_disable_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igc_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void igc_free_all_tx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igc_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_tx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igc_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igc_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + */ +int igc_setup_tx_resources(struct igc_ring *tx_ring) +{ + struct net_device *ndev = tx_ring->netdev; + struct device *dev = tx_ring->dev; + int size = 0; + + size = sizeof(struct igc_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_tx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igc_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + netdev_err(dev, "Error on Tx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } +} + +static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) +{ + struct igc_rx_buffer *bi; + u16 i; + + for (i = 0; i < ring->count; i++) { + bi = &ring->rx_buffer_info[i]; + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} + +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *ring) +{ + if (ring->xsk_pool) + igc_clean_rx_ring_xsk_pool(ring); + else + igc_clean_rx_ring_page_shared(ring); + + clear_ring_uses_large_buffer(ring); + + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +/** + * igc_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_rx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igc_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igc_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +void igc_free_rx_resources(struct igc_ring *rx_ring) +{ + igc_clean_rx_ring(rx_ring); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igc_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void igc_free_all_rx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igc_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igc_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + */ +int igc_setup_rx_resources(struct igc_ring *rx_ring) +{ + struct net_device *ndev = rx_ring->netdev; + struct device *dev = rx_ring->dev; + u8 index = rx_ring->queue_index; + int size, desc_len, res; + + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, + rx_ring->q_vector->napi.napi_id); + if (res < 0) { + netdev_err(ndev, "Failed to register xdp_rxq index %u\n", + index); + return res; + } + + size = sizeof(struct igc_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + desc_len = sizeof(union igc_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_rx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igc_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + netdev_err(dev, "Error on Rx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + if (!igc_xdp_is_enabled(adapter) || + !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) + return NULL; + + return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); +} + +/** + * igc_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + union igc_adv_rx_desc *rx_desc; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + u64 rdba = ring->dma; + u32 buf_size; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } + + if (igc_xdp_is_enabled(adapter)) + set_ring_uses_large_buffer(ring); + + /* disable the queue */ + wr32(IGC_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(IGC_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(IGC_RDBAH(reg_idx), rdba >> 32); + wr32(IGC_RDLEN(reg_idx), + ring->count * sizeof(union igc_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); + wr32(IGC_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* reset next-to- use/clean to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGC_RXBUFFER_3072; + else + buf_size = IGC_RXBUFFER_2048; + + srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + wr32(IGC_SRRCTL(reg_idx), srrctl); + + rxdctl |= IGC_RX_PTHRESH; + rxdctl |= IGC_RX_HTHRESH << 8; + rxdctl |= IGC_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igc_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGC_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; + + wr32(IGC_RXDCTL(reg_idx), rxdctl); +} + +/** + * igc_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx(struct igc_adapter *adapter) +{ + int i; + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igc_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + */ +static void igc_configure_tx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + u32 txdctl = 0; + + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + + /* disable the queue */ + wr32(IGC_TXDCTL(reg_idx), 0); + wrfl(); + mdelay(10); + + wr32(IGC_TDLEN(reg_idx), + ring->count * sizeof(union igc_adv_tx_desc)); + wr32(IGC_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(IGC_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); + wr32(IGC_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGC_TX_PTHRESH; + txdctl |= IGC_TX_HTHRESH << 8; + txdctl |= IGC_TX_WTHRESH << 16; + + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + wr32(IGC_TXDCTL(reg_idx), txdctl); +} + +/** + * igc_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + */ +static void igc_configure_tx(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igc_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + */ +static void igc_setup_mrqc(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); +} + +/** + * igc_setup_rctl - configure the receive control registers + * @adapter: Board private structure + */ +static void igc_setup_rctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(IGC_RCTL); + + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); + + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + /* enable stripping of CRC. Newer features require + * that the HW strips the CRC. + */ + rctl |= IGC_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= IGC_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(IGC_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in set_rx_mode + */ + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ + IGC_RCTL_BAM | /* RX All Bcast Pkts */ + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ + } + + wr32(IGC_RCTL, rctl); +} + +/** + * igc_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + */ +static void igc_setup_tctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which icould be enabled by default */ + wr32(IGC_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); + + /* Enable transmits */ + tctl |= IGC_TCTL_EN; + + wr32(IGC_TCTL, tctl); +} + +/** + * igc_set_mac_filter_hw() - Set MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be set + * @index: Filter index + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + */ +static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, + enum igc_mac_filter_type type, + const u8 *addr, int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 ral, rah; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + ral = le32_to_cpup((__le32 *)(addr)); + rah = le16_to_cpup((__le16 *)(addr + 4)); + + if (type == IGC_MAC_FILTER_TYPE_SRC) { + rah &= ~IGC_RAH_ASEL_MASK; + rah |= IGC_RAH_ASEL_SRC_ADDR; + } + + if (queue >= 0) { + rah &= ~IGC_RAH_QSEL_MASK; + rah |= (queue << IGC_RAH_QSEL_SHIFT); + rah |= IGC_RAH_QSEL_ENABLE; + } + + rah |= IGC_RAH_AV; + + wr32(IGC_RAL(index), ral); + wr32(IGC_RAH(index), rah); + + netdev_dbg(dev, "MAC address filter set in HW: index %d", index); +} + +/** + * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be cleared + * @index: Filter index + */ +static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + wr32(IGC_RAL(index), 0); + wr32(IGC_RAH(index), 0); + + netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igc_set_default_mac_filter(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + u8 *addr = adapter->hw.mac.addr; + + netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); + + igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +/** + * igc_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int igc_set_mac(struct net_device *netdev, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igc_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igc_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igc_write_mc_addr_list(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igc_update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igc_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) +{ + struct igc_adapter *adapter = netdev_priv(ring->netdev); + ktime_t cycle_time = adapter->cycle_time; + ktime_t base_time = adapter->base_time; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; + u32 launchtime; + s64 n; + + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; + + if (ktime_compare(txtime, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. + */ + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; + + return cpu_to_le32(launchtime); +} + +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + err = igc_init_empty_frame(ring, first, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + __le32 launch_time, bool first_flag, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For i225, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->launch_time = launch_time; +} + +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); +} + +static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + struct igc_adapter *adapter = netdev_priv(netdev); + if (!get_ecdev(adapter)) { + netif_stop_subqueue(netdev, tx_ring->queue_index); + } + + /* memory barriier comment */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igc_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + if (!get_ecdev(adapter)) { + netif_wake_subqueue(netdev, tx_ring->queue_index); + } + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + if (igc_desc_unused(tx_ring) >= size) + return 0; + return __igc_maybe_stop_tx(tx_ring, size); +} + +#define IGC_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, + IGC_ADVTXD_DCMD_VLE); + + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igc_tx_olinfo_status(struct igc_ring *tx_ring, + union igc_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; + + /* insert L4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * + ((IGC_TXD_POPTS_TXSM << 8) / + IGC_TX_FLAGS_CSUM); + + /* insert IPv4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * + (((IGC_TXD_POPTS_IXSM << 8)) / + IGC_TX_FLAGS_IPV4); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int igc_tx_map(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 tx_flags = first->tx_flags; + skb_frag_t *frag; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + dma_addr_t dma; + u32 cmd_type; + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + + cmd_type = igc_tx_cmd_type(skb, tx_flags); + tx_desc = IGC_TX_DESC(tx_ring, i); + + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGC_MAX_DATA_PER_TXD; + size -= IGC_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGC_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + + return 0; +dma_error: + netdev_err(tx_ring->netdev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (!get_ecdev(adapter)) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + tx_ring->next_to_use = i; + + return -1; +} + +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); + + return 1; +} + +static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, + struct igc_ring *tx_ring) +{ + bool first_flag = false, insert_empty = false; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + struct igc_tx_buffer *first; + __le32 launch_time = 0; + u32 tx_flags = 0; + unsigned short f; + ktime_t txtime; + u8 hdr_len = 0; + int tso = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igc_maybe_stop_tx(tx_ring, count + 5)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + struct igc_tx_buffer *empty_info; + struct sk_buff *empty; + void *data; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (!empty) + goto done; + + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + if (igc_init_tx_empty_descriptor(tx_ring, + empty, + empty_info) < 0) + dev_kfree_skb_any(empty); + } + +done: + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGC_TX_BUFFER_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (unlikely(!get_ecdev(adapter) && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + /* FIXME: add support for retrieving timestamps from + * the other timer registers before skipping the + * timestamping request. + */ + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGC_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first, launch_time, first_flag); + + igc_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + if (!get_ecdev(adapter)) { + dev_kfree_skb_any(first->skb); + first->skb = NULL; + } + + return NETDEV_TX_OK; +} + +static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); +} + +static void igc_rx_checksum(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igc_test_staterr(rx_desc, + IGC_RXDEXT_STATERR_L4E | + IGC_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets (aka let the stack check the crc32c) + */ + if (!(skb->len == 60 && + test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | + IGC_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netdev_dbg(ring->netdev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +static inline void igc_rx_hash(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +static void igc_rx_vlan(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { + if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && + test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/** + * igc_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in order + * to populate the hash, checksum, VLAN, protocol, and other fields within the + * skb. + */ +static void igc_process_skb_fields(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + igc_rx_hash(rx_ring, rx_desc, skb); + + igc_rx_checksum(rx_ring, rx_desc, skb); + + igc_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl |= IGC_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~IGC_CTRL_VME; + } + wr32(IGC_CTRL, ctrl); +} + +static void igc_restore_vlan(struct igc_adapter *adapter) +{ + igc_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, + const unsigned int size, + int *rx_buffer_pgcnt) +{ + struct igc_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, + unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) + buffer->page_offset ^= truesize; +#else + buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(ring) / 2; +#else + truesize = ring_uses_build_skb(ring) ? + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +/** + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + */ +static void igc_add_rx_frag(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(rx_ring) / 2; +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + + igc_rx_buffer_flip(rx_buffer, truesize); +} + +static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp) +{ + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = napi_build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); + + igc_rx_buffer_flip(rx_buffer, truesize); + return skb; +} + +static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + void *va = xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGC_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + igc_rx_buffer_flip(rx_buffer, truesize); + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * igc_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void igc_reuse_rx_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct igc_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igc_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool igc_is_non_eop(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGC_RX_DESC(rx_ring, ntc)); + + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igc_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool igc_cleanup_headers(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void igc_put_rx_buffer(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + igc_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) +{ + struct igc_adapter *adapter = rx_ring->q_vector->adapter; + + if (ring_uses_build_skb(rx_ring)) + return IGC_SKB_PAD; + if (igc_xdp_is_enabled(adapter)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igc_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igc_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring + * @cleaned_count: number of buffers to clean + */ +static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) +{ + union igc_adv_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct igc_rx_buffer *bi; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGC_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igc_rx_bufsz(rx_ring); + + do { + if (!igc_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGC_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) +{ + union igc_adv_rx_desc *desc; + u16 i = ring->next_to_use; + struct igc_rx_buffer *bi; + dma_addr_t dma; + bool ok = true; + + if (!count) + return ok; + + desc = IGC_RX_DESC(ring, i); + bi = &ring->rx_buffer_info[i]; + i -= ring->count; + + do { + bi->xdp = xsk_buff_alloc(ring->xsk_pool); + if (!bi->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(bi->xdp); + desc->read.pkt_addr = cpu_to_le64(dma); + + desc++; + bi++; + i++; + if (unlikely(!i)) { + desc = IGC_RX_DESC(ring, 0); + bi = ring->rx_buffer_info; + i -= ring->count; + } + + /* Clear the length for the next_to_use descriptor. */ + desc->wb.upper.length = 0; + + count--; + } while (count); + + i += ring->count; + + if (ring->next_to_use != i) { + ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, ring->tail); + } + + return ok; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, + struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, index = ring->next_to_use; + struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; + struct igc_tx_buffer *buffer = head; + union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); + u32 olinfo_status, len = xdpf->len, cmd_type; + void *data = xdpf->data; + u16 i; + + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); + + if (igc_maybe_stop_tx(ring, count + 3)) { + /* this is a hard error */ + return -EBUSY; + } + + i = 0; + head->bytecount = xdp_get_frame_len(xdpf); + head->type = IGC_TX_BUFFER_TYPE_XDP; + head->gso_segs = 1; + head->xdpf = xdpf; + + olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + for (;;) { + dma_addr_t dma; + + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, + "Failed to map DMA for TX\n"); + goto unmap; + } + + dma_unmap_len_set(buffer, len, len); + dma_unmap_addr_set(buffer, dma, dma); + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | len; + + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.buffer_addr = cpu_to_le64(dma); + + buffer->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + buffer = &ring->tx_buffer_info[index]; + desc = IGC_TX_DESC(ring, index); + desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(ring), head->bytecount); + /* set the timestamp */ + head->time_stamp = jiffies; + /* set next_to_watch value indicating a packet is present */ + head->next_to_watch = desc; + ring->next_to_use = index; + + return 0; + +unmap: + for (;;) { + buffer = &ring->tx_buffer_info[index]; + if (dma_unmap_len(buffer, len)) + dma_unmap_page(ring->dev, + dma_unmap_addr(buffer, dma), + dma_unmap_len(buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(buffer, len, 0); + if (buffer == head) + break; + + if (!index) + index += ring->count; + index--; + } + + return -ENOMEM; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= adapter->num_tx_queues) + index -= adapter->num_tx_queues; + + return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int res; + + if (unlikely(!xdpf)) + return -EFAULT; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + res = igc_xdp_init_tx_descriptor(ring, xdpf); + __netif_tx_unlock(nq); + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __igc_xdp_run_prog(struct igc_adapter *adapter, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + return IGC_XDP_PASS; + case XDP_TX: + if (igc_xdp_xmit_back(adapter, xdp) < 0) + goto out_failure; + return IGC_XDP_TX; + case XDP_REDIRECT: + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + goto out_failure; + return IGC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + return IGC_XDP_CONSUMED; + } +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(adapter->xdp_prog); + if (!prog) { + res = IGC_XDP_PASS; + goto out; + } + + res = __igc_xdp_run_prog(adapter, prog, xdp); + +out: + return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ + /* Once tail pointer is updated, hardware can fetch the descriptors + * any time so we issue a write membar here to ensure all memory + * writes are complete before the tail pointer is updated. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + + if (status & IGC_XDP_TX) { + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + igc_flush_tx_descriptors(ring); + __netif_tx_unlock(nq); + } + + if (status & IGC_XDP_REDIRECT) + xdp_do_flush(); +} + +static void igc_update_rx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + +static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = igc_desc_unused(rx_ring); + int xdp_status = 0, rx_buffer_pgcnt; + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *rx_desc; + struct igc_rx_buffer *rx_buffer; + unsigned int size, truesize; + ktime_t timestamp = 0; + struct xdp_buff xdp; + int pkt_offset = 0; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { + igc_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); + truesize = igc_get_rx_frame_truesize(rx_ring, size); + + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + pktbuf); + pkt_offset = IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + if (!skb) { + xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); + xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), + igc_rx_offset(rx_ring) + pkt_offset, + size, true); + xdp_buff_clear_frags_flag(&xdp); + + skb = igc_xdp_run_prog(adapter, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + switch (xdp_res) { + case IGC_XDP_CONSUMED: + rx_buffer->pagecnt_bias++; + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + igc_rx_buffer_flip(rx_buffer, truesize); + xdp_status |= xdp_res; + break; + } + + total_packets++; + total_bytes += size; + } + else if (get_ecdev(adapter)) { + unsigned char *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + ecdev_receive(get_ecdev(adapter), va, size); + adapter->ec_watchdog_jiffies = jiffies; + igc_reuse_rx_page(rx_ring, rx_buffer); + } + else { + if (skb) + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igc_build_skb(rx_ring, rx_buffer, &xdp); + else + skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, + timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); + } + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igc_is_non_eop(rx_ring, rx_desc)) + continue; + + + if (get_ecdev(adapter)) { + total_packets++; + continue; + } + + /* verify the packet layout is correct */ + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + igc_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (cleaned_count) + igc_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, + struct xdp_buff *xdp) +{ + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + net_prefetch(xdp->data_meta); + + skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + return skb; +} + +static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, + union igc_adv_rx_desc *desc, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + struct igc_ring *ring = q_vector->rx.ring; + struct sk_buff *skb; + + skb = igc_construct_skb_zc(ring, xdp); + if (!skb) { + ring->rx_stats.alloc_failed++; + return; + } + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + if (igc_cleanup_headers(ring, desc, skb)) + return; + + igc_process_skb_fields(ring, desc, skb); + napi_gro_receive(&q_vector->napi, skb); +} + +static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *ring = q_vector->rx.ring; + u16 cleaned_count = igc_desc_unused(ring); + int total_bytes = 0, total_packets = 0; + u16 ntc = ring->next_to_clean; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + + rcu_read_lock(); + + prog = READ_ONCE(adapter->xdp_prog); + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *desc; + struct igc_rx_buffer *bi; + ktime_t timestamp = 0; + unsigned int size; + int res; + + desc = IGC_RX_DESC(ring, ntc); + size = le16_to_cpu(desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = &ring->rx_buffer_info[ntc]; + + if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + bi->xdp->data); + + bi->xdp->data += IGC_TS_HDR_LEN; + + /* HW timestamp has been copied into local variable. Metadata + * length when XDP program is called should be 0. + */ + bi->xdp->data_meta += IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + + res = __igc_xdp_run_prog(adapter, prog, bi->xdp); + switch (res) { + case IGC_XDP_PASS: + igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + fallthrough; + case IGC_XDP_CONSUMED: + xsk_buff_free(bi->xdp); + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + bi->xdp = NULL; + total_bytes += size; + total_packets++; + cleaned_count++; + ntc++; + if (ntc == ring->count) + ntc = 0; + } + + ring->next_to_clean = ntc; + rcu_read_unlock(); + + if (cleaned_count >= IGC_RX_BUFFER_WRITE) + failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (xsk_uses_need_wakeup(ring->xsk_pool)) { + if (failure || ring->next_to_clean == ring->next_to_use) + xsk_set_rx_need_wakeup(ring->xsk_pool); + else + xsk_clear_rx_need_wakeup(ring->xsk_pool); + return total_packets; + } + + return failure ? budget : total_packets; +} + +static void igc_update_tx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->tx.ring; + + u64_stats_update_begin(&ring->tx_syncp); + ring->tx_stats.bytes += bytes; + ring->tx_stats.packets += packets; + u64_stats_update_end(&ring->tx_syncp); + + q_vector->tx.total_bytes += bytes; + q_vector->tx.total_packets += packets; +} + +static void igc_xdp_xmit_zc(struct igc_ring *ring) +{ + struct xsk_buff_pool *pool = ring->xsk_pool; + struct netdev_queue *nq = txring_txq(ring); + union igc_adv_tx_desc *tx_desc = NULL; + int cpu = smp_processor_id(); + u16 ntu = ring->next_to_use; + struct xdp_desc xdp_desc; + u16 budget; + + if (!netif_carrier_ok(ring->netdev)) + return; + + __netif_tx_lock(nq, cpu); + + budget = igc_desc_unused(ring); + + while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + u32 cmd_type, olinfo_status; + struct igc_tx_buffer *bi; + dma_addr_t dma; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + xdp_desc.len; + olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; + + dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + + tx_desc = IGC_TX_DESC(ring, ntu); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + bi = &ring->tx_buffer_info[ntu]; + bi->type = IGC_TX_BUFFER_TYPE_XSK; + bi->protocol = 0; + bi->bytecount = xdp_desc.len; + bi->gso_segs = 1; + bi->time_stamp = jiffies; + bi->next_to_watch = tx_desc; + + netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); + + ntu++; + if (ntu == ring->count) + ntu = 0; + } + + ring->next_to_use = ntu; + if (tx_desc) { + igc_flush_tx_descriptors(ring); + xsk_tx_release(pool); + } + + __netif_tx_unlock(nq); +} + +/** + * igc_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + */ +static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + struct igc_ring *tx_ring = q_vector->tx.ring; + unsigned int i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 xsk_frames = 0; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGC_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + if (!get_ecdev(adapter)) { + napi_consume_skb(tx_buffer->skb, napi_budget); + } + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (!get_ecdev(adapter)) { + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + } + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + igc_update_tx_stats(q_vector, total_packets, total_bytes); + + if (tx_ring->xsk_pool) { + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) + xsk_set_tx_need_wakeup(tx_ring->xsk_pool); + igc_xdp_xmit_zc(tx_ring); + } + + if (!get_ecdev(adapter) && + test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct igc_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && + (rd32(IGC_TDH(tx_ring->reg_idx)) != + readl(tx_ring->tail))) { + /* detected Tx unit hang */ + netdev_err(tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(!get_ecdev(adapter) && total_packets && + netif_carrier_ok(tx_ring->netdev) && + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGC_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +static int igc_find_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 ral, rah; + int i; + + for (i = 0; i < max_entries; i++) { + ral = rd32(IGC_RAL(i)); + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + continue; + if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) + continue; + if ((rah & IGC_RAH_RAH_MASK) != + le16_to_cpup((__le16 *)(addr + 4))) + continue; + if (ral != le32_to_cpup((__le32 *)(addr))) + continue; + + return i; + } + + return -1; +} + +static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 rah; + int i; + + for (i = 0; i < max_entries; i++) { + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + return i; + } + + return -1; +} + +/** + * igc_add_mac_filter() - Add MAC address filter + * @adapter: Pointer to adapter where the filter should be added + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr, + int queue) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index >= 0) + goto update_filter; + + index = igc_get_avail_mac_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", + index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr, queue); + +update_filter: + igc_set_mac_filter_hw(adapter, index, type, addr, queue); + return 0; +} + +/** + * igc_del_mac_filter() - Delete MAC address filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @type: MAC address filter type (source or destination) + * @addr: MAC address + */ +static void igc_del_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index < 0) + return; + + if (index == 0) { + /* If this is the default filter, we don't actually delete it. + * We just reset to its default value i.e. disable queue + * assignment. + */ + netdev_dbg(dev, "Disable default MAC filter queue assignment"); + + igc_set_mac_filter_hw(adapter, 0, type, addr, -1); + } else { + netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", + index, + type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr); + + igc_clear_mac_filter_hw(adapter, index); + } +} + +/** + * igc_add_vlan_prio_filter() - Add VLAN priority filter + * @adapter: Pointer to adapter where the filter should be added + * @prio: VLAN priority value + * @queue: Queue number which matching frames are assigned to + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, + int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + if (vlanpqf & IGC_VLANPQF_VALID(prio)) { + netdev_dbg(dev, "VLAN priority filter already in use\n"); + return -EEXIST; + } + + vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); + vlanpqf |= IGC_VLANPQF_VALID(prio); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", + prio, queue); + return 0; +} + +/** + * igc_del_vlan_prio_filter() - Delete VLAN priority filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @prio: VLAN priority value + */ +static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) +{ + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + vlanpqf &= ~IGC_VLANPQF_VALID(prio); + vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", + prio); +} + +static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if (!(etqf & IGC_ETQF_FILTER_ENABLE)) + return i; + } + + return -1; +} + +/** + * igc_add_etype_filter() - Add ethertype filter + * @adapter: Pointer to adapter where the filter should be added + * @etype: Ethertype value + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, + int queue) +{ + struct igc_hw *hw = &adapter->hw; + int index; + u32 etqf; + + index = igc_get_avail_etype_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + etqf = rd32(IGC_ETQF(index)); + + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= etype; + + if (queue >= 0) { + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); + etqf |= IGC_ETQF_QUEUE_ENABLE; + } + + etqf |= IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(index), etqf); + + netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", + etype, queue); + return 0; +} + +static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) + return i; + } + + return -1; +} + +/** + * igc_del_etype_filter() - Delete ethertype filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @etype: Ethertype value + */ +static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int index; + + index = igc_find_etype_filter(adapter, etype); + if (index < 0) + return; + + wr32(IGC_ETQF(index), 0); + + netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", + etype); +} + +static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + +static int igc_enable_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + int err; + + if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_add_etype_filter(adapter, rule->filter.etype, + rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + err = igc_add_vlan_prio_filter(adapter, prio, rule->action); + if (err) + return err; + } + + return 0; +} + +static void igc_disable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_del_etype_filter(adapter, rule->filter.etype); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + igc_del_vlan_prio_filter(adapter, prio); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr); +} + +/** + * igc_get_nfc_rule() - Get NFC rule + * @adapter: Pointer to adapter + * @location: Rule location + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: Pointer to NFC rule at @location. If not found, NULL. + */ +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location) +{ + struct igc_nfc_rule *rule; + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (rule->location == location) + return rule; + if (rule->location > location) + break; + } + + return NULL; +} + +/** + * igc_del_nfc_rule() - Delete NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be deleted + * + * Disable NFC rule in hardware and delete it from adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + */ +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + igc_disable_nfc_rule(adapter, rule); + + list_del(&rule->list); + adapter->nfc_rule_count--; + + kfree(rule); +} + +static void igc_flush_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule, *tmp; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +/** + * igc_add_nfc_rule() - Add NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be added + * + * Enable NFC rule in hardware and add it to adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 on success, negative errno on failure. + */ +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + struct igc_nfc_rule *pred, *cur; + int err; + + err = igc_enable_nfc_rule(adapter, rule); + if (err) + return err; + + pred = NULL; + list_for_each_entry(cur, &adapter->nfc_rule_list, list) { + if (cur->location >= rule->location) + break; + pred = cur; + } + + list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); + adapter->nfc_rule_count++; + return 0; +} + +static void igc_restore_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) + igc_enable_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); + return 0; +} + +/** + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + */ +static void igc_set_rx_mode(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IGC_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igc_write_mc_addr_list(netdev); + if (count < 0) + rctl |= IGC_RCTL_MPE; + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) + rctl |= IGC_RCTL_UPE; + + /* update state of unicast and multicast */ + rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); + wr32(IGC_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) + rlpml = IGC_MAX_FRAME_BUILD_SKB; +#endif + wr32(IGC_RLPML, rlpml); +} + +/** + * igc_configure - configure the hardware for RX and TX + * @adapter: private board structure + */ +static void igc_configure(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i = 0; + + igc_get_hw_control(adapter); + igc_set_rx_mode(netdev); + + igc_restore_vlan(adapter); + + igc_setup_tctl(adapter); + igc_setup_mrqc(adapter); + igc_setup_rctl(adapter); + + igc_set_default_mac_filter(adapter); + igc_restore_nfc_rules(adapter); + + igc_configure_tx(adapter); + igc_configure_rx(adapter); + + igc_rx_fifo_flush_base(&adapter->hw); + + /* call igc_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); + } +} + +/** + * igc_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + */ +static void igc_write_ivar(struct igc_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(IGC_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; + + array_wr32(IGC_IVAR0, index, ivar); +} + +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + int rx_queue = IGC_N0_QUEUE; + int tx_queue = IGC_N0_QUEUE; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case igc_i225: + if (rx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igc_configure_msix - Configure MSI-X hardware + * @adapter: Pointer to adapter structure + * + * igc_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + */ +static void igc_configure_msix(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i, vector = 0; + u32 tmp; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case igc_i225: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | IGC_IVAR_VALID) << 8; + + wr32(IGC_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igc_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void igc_irq_enable(struct igc_adapter *adapter) +{ + if (get_ecdev(adapter)) { + /* skip enabling interrupts */ + return; + } + + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; + u32 regval = rd32(IGC_EIAC); + + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(IGC_EIAM); + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); + wr32(IGC_EIMS, adapter->eims_enable_mask); + wr32(IGC_IMS, ims); + } else { + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + } +} + +/** + * igc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void igc_irq_disable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 regval = rd32(IGC_EIAM); + + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); + wr32(IGC_EIMC, adapter->eims_enable_mask); + regval = rd32(IGC_EIAC); + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(IGC_IAM, 0); + wr32(IGC_IMC, ~0); + wrfl(); + + if (get_ecdev(adapter)) { + /* skip synchonizing IRQs */ + return; + } + + if (adapter->msix_entries) { + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) +{ + /* Determine if we need to pair queues. */ + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; +} + +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +{ + return IGC_MAX_RX_QUEUES; +} + +static void igc_init_queue_configuration(struct igc_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igc_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igc_set_flag_queue_pairs(adapter, max_rss_queues); +} + +/** + * igc_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igc_free_q_vector. + */ +static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* if we're coming from igc_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); +} + +/** + * igc_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + */ +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igc_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igc_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + */ +static void igc_free_q_vectors(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igc_reset_q_vector(adapter, v_idx); + igc_free_q_vector(adapter, v_idx); + } +} + +/** + * igc_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + */ +static void igc_update_itr(struct igc_q_vector *q_vector, + struct igc_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes / packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igc_set_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + current_itr = 0; + new_itr = IGC_4K_ITR; + goto set_itr_now; + default: + break; + } + + igc_update_itr(q_vector, &q_vector->tx); + igc_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igc_reset_interrupt_capability(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } + + while (v_idx--) + igc_reset_q_vector(adapter, v_idx); +} + +/** + * igc_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: Pointer to adapter structure + * @msix: boolean value for MSI-X capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix) +{ + int numvecs, i; + int err; + + if (!msix) + goto msi_only; + adapter->flags |= IGC_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) + return; + + /* populate entry values */ + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + igc_reset_interrupt_capability(adapter); + +msi_only: + adapter->flags &= ~IGC_FLAG_HAS_MSIX; + + adapter->rss_queues = 1; + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGC_FLAG_HAS_MSI; +} + +/** + * igc_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igc_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + */ +static void igc_update_ring_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGC_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGC_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGC_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +static void igc_ring_irq_enable(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igc_set_itr(q_vector); + else + igc_update_ring_itr(q_vector); + } + + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(IGC_EIMS, q_vector->eims_value); + else + igc_irq_enable(adapter); + } +} + +static void igc_add_ring(struct igc_ring *ring, + struct igc_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igc_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + */ +static void igc_cache_ring_register(struct igc_adapter *adapter) +{ + int i = 0, j = 0; + + switch (adapter->hw.mac.type) { + case igc_i225: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = j; + break; + } +} + +/** + * igc_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + */ +static int igc_poll(struct napi_struct *napi, int budget) +{ + struct igc_q_vector *q_vector = container_of(napi, + struct igc_q_vector, + napi); + struct igc_ring *rx_ring = q_vector->rx.ring; + bool clean_complete = true; + int work_done = 0; + + if (get_ecdev(q_vector->adapter)) + return -EBUSY; + + if (q_vector->tx.ring) + clean_complete = igc_clean_tx_irq(q_vector, budget); + + if (rx_ring) { + int cleaned = rx_ring->xsk_pool ? + igc_clean_rx_irq_zc(q_vector, budget) : + igc_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igc_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igc_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int igc_alloc_q_vector(struct igc_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct igc_q_vector *q_vector; + struct igc_ring *ring; + int ring_count; + + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + else + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); + if (!q_vector) + return -ENOMEM; + + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); + q_vector->itr_val = IGC_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igc_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igc_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * igc_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int igc_alloc_q_vectors(struct igc_adapter *adapter) +{ + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igc_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: Pointer to adapter structure + * @msix: boolean for MSI-X capability + * + * This function initializes the interrupts and allocates all of the queues. + */ +static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) +{ + struct net_device *dev = adapter->netdev; + int err = 0; + + igc_set_interrupt_capability(adapter, msix); + + err = igc_alloc_q_vectors(adapter); + if (err) { + netdev_err(dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igc_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igc_reset_interrupt_capability(adapter); + return err; +} + +/** + * igc_sw_init - Initialize general software structures (struct igc_adapter) + * @adapter: board private structure to initialize + * + * igc_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int igc_sw_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGC_DEFAULT_TXD; + adapter->rx_ring_count = IGC_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGC_DEFAULT_ITR; + adapter->tx_itr_setting = IGC_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; + + /* adjust max frame to be at least the size of a standard frame */ + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + mutex_init(&adapter->nfc_rule_lock); + INIT_LIST_HEAD(&adapter->nfc_rule_list); + adapter->nfc_rule_count = 0; + + spin_lock_init(&adapter->stats64_lock); + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGC_FLAG_HAS_MSIX; + + igc_init_queue_configuration(adapter); + + /* This call may decrease the number of queues */ + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igc_irq_disable(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + return 0; +} + +/** + * igc_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + */ +void igc_up(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i = 0; + + /* hardware has been reset, we need to reload some things */ + igc_configure(adapter); + + clear_bit(__IGC_DOWN, &adapter->state); + if (!get_ecdev(adapter)) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + } + if (adapter->msix_entries) + igc_configure_msix(adapter); + else + igc_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!get_ecdev(adapter)) { + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + } +} + +/** + * igc_update_stats - Update the board statistics counters + * @adapter: board private structure + */ +void igc_update_stats(struct igc_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.tlpic += rd32(IGC_TLPIC); + adapter->stats.rlpic += rd32(IGC_RLPIC); + adapter->stats.hgptc += rd32(IGC_HGPTC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + adapter->stats.colc += rd32(IGC_RERC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + + adapter->stats.iac += rd32(IGC_IAC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); +} + +/** + * igc_down - Close the interface + * @adapter: board private structure + */ +void igc_down(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i = 0; + + set_bit(__IGC_DOWN, &adapter->state); + + igc_ptp_suspend(adapter); + + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } + /* set trans_start so we don't get spurious watchdogs during reset */ + netif_trans_update(netdev); + if (!get_ecdev(adapter)) { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); + + igc_irq_disable(adapter); + } + + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (!get_ecdev(adapter) && adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igc_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + + igc_clean_all_tx_rings(adapter); + igc_clean_all_rx_rings(adapter); +} + +void igc_reinit_locked(struct igc_adapter *adapter) +{ + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igc_down(adapter); + igc_up(adapter); + clear_bit(__IGC_RESETTING, &adapter->state); +} + +static void igc_reset_task(struct work_struct *work) +{ + struct igc_adapter *adapter; + + adapter = container_of(work, struct igc_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igc_rings_dump(adapter); + igc_regs_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igc_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igc_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int igc_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(netdev, "Jumbo frames not supported with XDP"); + return -EINVAL; + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igc_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igc_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igc_up(adapter); + else + igc_reset(adapter); + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +/** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, + unsigned int __always_unused txqueue) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + wr32(IGC_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +/** + * igc_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + */ +static void igc_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + if (!test_bit(__IGC_RESETTING, &adapter->state)) + igc_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igc_vlan_mode(netdev, features); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) + igc_flush_nfc_rules(adapter); + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igc_tsync_interrupt(struct igc_adapter *adapter) +{ + u32 ack, tsauxc, sec, nsec, tsicr; + struct igc_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } + + if (tsicr & IGC_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= IGC_TSICR_TXTS; + } + + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(IGC_TSICR, ack); +} + +/** + * igc_msix_other - msix other interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t igc_msix_other(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_hw *hw = &adapter->hw; + u32 icr = rd32(IGC_ICR); + + /* reading ICR causes bit 31 of EICR to be cleared */ + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & IGC_ICR_LSC) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + wr32(IGC_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igc_write_itr(struct igc_q_vector *q_vector) +{ + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = IGC_ITR_VAL_MASK; + + itr_val |= IGC_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igc_msix_ring(int irq, void *data) +{ + struct igc_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igc_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_request_msix - Initialize MSI-X interrupts + * @adapter: Pointer to adapter structure + * + * igc_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + */ +static int igc_request_msix(struct igc_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + int i = 0, err = 0, vector = 0, free_vector = 0; + struct net_device *netdev = adapter->netdev; + if (get_ecdev(adapter)) { + /* avoid requesting MSI-X. */ + return 0; + } + + err = request_irq(adapter->msix_entries[vector].vector, + &igc_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igc_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igc_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: Pointer to adapter structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) +{ + igc_free_q_vectors(adapter); + igc_reset_interrupt_capability(adapter); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igc_update_phy_info(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + + igc_get_phy_info(&adapter->hw); +} + +/** + * igc_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + */ +bool igc_has_link(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +/** + * igc_watchdog - Timer Call-back + * @t: timer for the watchdog + */ +static void igc_watchdog(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igc_watchdog_task(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, + struct igc_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, retry_count = 20; + u32 link; + int i; + + if (get_ecdev(adapter)) + hw->mac.get_link_status = true; + + link = igc_has_link(adapter); + + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), link); + return; + } + + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(IGC_CTRL); + /* Link status message must follow this format */ + netdev_info(netdev, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & IGC_CTRL_TFCE) && + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : + (ctrl & IGC_CTRL_RFCE) ? "RX" : + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGC_FLAG_EEE) && + adapter->link_duplex == HALF_DUPLEX) { + netdev_info(netdev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); + adapter->hw.dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igc_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 1; + break; + } + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + netdev_err(netdev, "exceed max 2 second\n"); + } + } else { + netdev_err(netdev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Links status message must follow this format */ + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGC_FLAG_MAS_ENABLE) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGC_FLAG_MAS_ENABLE)) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(IGC_EICS, eics); + } else { + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } + + igc_ptp_tx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +/** + * igc_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr_msi(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(IGC_ICR); + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(IGC_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & IGC_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igc_free_irq(struct igc_adapter *adapter) +{ + if (get_ecdev(adapter)) { + /* no IRQ to free in EtherCAT operation */ + return; + } + + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igc_request_irq - initialize interrupts + * @adapter: Pointer to adapter structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static int igc_request_irq(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + err = igc_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + igc_clear_interrupt_scheme(adapter); + err = igc_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + igc_setup_all_tx_resources(adapter); + igc_setup_all_rx_resources(adapter); + igc_configure(adapter); + } + + igc_assign_vector(adapter->q_vector[0], 0); + + if (!get_ecdev(adapter) && adapter->flags & IGC_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, &igc_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igc_reset_interrupt_capability(adapter); + adapter->flags &= ~IGC_FLAG_HAS_MSI; + } + + if (!get_ecdev(adapter)) { + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + netdev_err(netdev, "Error %d getting interrupt\n", err); + } +request_done: + return err; +} + +/** + * __igc_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: boolean indicating if the device is resuming + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int __igc_open(struct net_device *netdev, bool resuming) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + int err = 0; + int i = 0; + + /* disallow open during test */ + + if (test_bit(__IGC_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); + } + else { + netif_carrier_off(netdev); + } + + /* allocate transmit descriptors */ + err = igc_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igc_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igc_power_up_link(adapter); + + igc_configure(adapter); + + err = igc_request_irq(adapter); + if (err) + goto err_req_irq; + + if (!get_ecdev(adapter)) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + + clear_bit(__IGC_DOWN, &adapter->state); + if (!get_ecdev(adapter)) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + } + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + if (!get_ecdev(adapter)) { + netif_tx_start_all_queues(netdev); + } + + if (!get_ecdev(adapter)) { + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + } + + return IGC_SUCCESS; + +err_set_queues: + igc_free_irq(adapter); +err_req_irq: + igc_release_hw_control(adapter); + igc_power_down_phy_copper_base(&adapter->hw); + igc_free_all_rx_resources(adapter); +err_setup_rx: + igc_free_all_tx_resources(adapter); +err_setup_tx: + igc_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igc_open(struct net_device *netdev) +{ + return __igc_open(netdev, false); +} + +/** + * __igc_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: boolean indicating the device is suspending + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int __igc_close(struct net_device *netdev, bool suspending) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igc_down(adapter); + + igc_release_hw_control(adapter); + + igc_free_irq(adapter); + + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +int igc_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igc_close(netdev, false); + return 0; +} + +/** + * igc_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return igc_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igc_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, + bool enable) +{ + struct igc_ring *ring; + + if (queue < 0 || queue >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) +{ + struct timespec64 b; + + b = ktime_to_timespec64(base_time); + + return timespec64_compare(now, &b) > 0; +} + +static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) +{ + int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct timespec64 now; + size_t n; + + if (qopt->cycle_time_extension) + return false; + + igc_ptp_read(adapter, &now); + + /* If we program the controller's BASET registers with a time + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. + */ + if (!is_base_time_past(qopt->base_time, &now)) + return false; + + for (n = 0; n < qopt->num_entries; n++) { + const struct tc_taprio_sched_entry *e, *prev; + int i; + + prev = n ? &qopt->entries[n - 1] : NULL; + e = &qopt->entries[n]; + + /* i225 only supports "global" frame preemption + * settings. + */ + if (e->command != TC_TAPRIO_CMD_SET_GATES) + return false; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (e->gate_mask & BIT(i)) { + queue_uses[i]++; + + /* There are limitations: A single queue cannot + * be opened and closed multiple times per cycle + * unless the gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) + return false; + } + } + + return true; +} + +static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + } + + return 0; +} + +static int igc_save_qbv_schedule(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; + u32 start_time = 0, end_time = 0; + size_t n; + int i; + + adapter->qbv_enable = qopt->enable; + + if (!qopt->enable) + return igc_tsn_clear_schedule(adapter); + + if (qopt->base_time < 0) + return -ERANGE; + + if (adapter->base_time) + return -EALREADY; + + if (!validate_schedule(adapter, qopt)) + return -EINVAL; + + adapter->cycle_time = qopt->cycle_time; + adapter->base_time = qopt->base_time; + + for (n = 0; n < qopt->num_entries; n++) { + struct tc_taprio_sched_entry *e = &qopt->entries[n]; + + end_time += e->interval; + + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!(e->gate_mask & BIT(i))) + continue; + + /* Check whether a queue stays open for more than one + * entry. If so, keep the start and advance the end + * time. + */ + if (!queue_configured[i]) + ring->start_time = start_time; + ring->end_time = end_time; + + queue_configured[i] = true; + } + + start_time += e->interval; + } + + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + if (!queue_configured[i]) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + + return 0; +} + +static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_qbv_schedule(adapter, qopt); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_TAPRIO: + return igc_tsn_enable_qbv_scheduling(adapter, type_data); + + case TC_SETUP_QDISC_ETF: + return igc_tsn_enable_launchtime(adapter, type_data); + + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return igc_xdp_setup_pool(adapter, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int i, drops; + + if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + + drops = 0; + for (i = 0; i < num_frames; i++) { + int err; + struct xdp_frame *xdpf = frames[i]; + + err = igc_xdp_init_tx_descriptor(ring, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return num_frames - drops; +} + +static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, + struct igc_q_vector *q_vector) +{ + struct igc_hw *hw = &adapter->hw; + u32 eics = 0; + + eics |= q_vector->eims_value; + wr32(IGC_EICS, eics); +} + +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_q_vector *q_vector; + struct igc_ring *ring; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igc_xdp_is_enabled(adapter)) + return -ENXIO; + + if (queue_id >= adapter->num_rx_queues) + return -EINVAL; + + ring = adapter->rx_ring[queue_id]; + + if (!ring->xsk_pool) + return -ENXIO; + + q_vector = adapter->q_vector[queue_id]; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + igc_trigger_rxtxq_interrupt(adapter, q_vector); + + return 0; +} + +static const struct net_device_ops igc_netdev_ops = { + .ndo_open = igc_open, + .ndo_stop = igc_close, + .ndo_start_xmit = igc_xmit_frame, + .ndo_set_rx_mode = igc_set_rx_mode, + .ndo_set_mac_address = igc_set_mac, + .ndo_change_mtu = igc_change_mtu, + .ndo_tx_timeout = igc_tx_timeout, + .ndo_get_stats64 = igc_get_stats64, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, + .ndo_eth_ioctl = igc_ioctl, + .ndo_setup_tc = igc_setup_tc, + .ndo_bpf = igc_bpf, + .ndo_xdp_xmit = igc_xdp_xmit, + .ndo_xsk_wakeup = igc_xsk_wakeup, +}; + +static void ec_kick_watchdog(struct irq_work *work) +{ + struct igc_adapter *adapter = + container_of(work, struct igc_adapter, ec_watchdog_kicker); + + schedule_work(&adapter->watchdog_task); +} + +/** +* ec_poll - EtherCAT poll routine +* @netdev: net device structure +* +* This function can never fail. +* +**/ +void ec_poll(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + int budget = 64; + + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + adapter->ec_watchdog_jiffies = jiffies; + irq_work_queue(&adapter->ec_watchdog_kicker); + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + if (q_vector->tx.ring) { + igc_clean_tx_irq(q_vector, budget); + } + + if (q_vector->rx.ring) { + igc_clean_rx_irq(q_vector, budget); + } + } +} + +/** + +/* PCIe configuration access */ +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_read_word(adapter->pdev, reg, value); + + return IGC_SUCCESS; +} + +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_write_word(adapter->pdev, reg, *value); + + return IGC_SUCCESS; +} + +u32 igc_rd32(struct igc_hw *hw, u32 reg) +{ + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (IGC_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igc->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + WARN(pci_device_is_present(igc->pdev), + "igc: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/** + * igc_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igc_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igc_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring the adapter private structure, + * and a hardware reset occur. + */ +static int igc_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct igc_adapter *adapter; + struct net_device *netdev; + struct igc_hw *hw; + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, igc_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), + IGC_MAX_TX_QUEUES); + + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = pci_save_state(pdev); + if (err) + goto err_ioremap; + + err = -EIO; + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) + goto err_ioremap; + + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igc_netdev_ops; + igc_ethtool_set_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_TSO_ECN; + netdev->features |= NETIF_F_RXCSUM; + netdev->features |= NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_HW_TC; + +#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; + + /* setup the private structure */ + err = igc_sw_init(adapter); + if (err) + goto err_sw_init; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= netdev->features; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + eth_hw_addr_set(netdev, hw->mac.addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* configure RXPBSIZE and TXPBSIZE */ + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igc_reset_task); + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0xaf; + + hw->fc.requested_mode = igc_fc_default; + hw->fc.current_mode = igc_fc_default; + + /* By default, support wake on port A */ + adapter->flags |= IGC_FLAG_WOL_SUPPORTED; + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) + adapter->wol |= IGC_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGC_FLAG_WOL_SUPPORTED); + + igc_ptp_init(adapter); + + igc_tsn_clear_schedule(adapter); + + /* reset the hardware with the new settings */ + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = true; + if (get_ecdev(adapter)) { + init_irq_work(&adapter->ec_watchdog_kicker, ec_kick_watchdog); + err = ecdev_open(get_ecdev(adapter)); + if (err) { + ecdev_withdraw(get_ecdev(adapter)); + goto err_register; + } + adapter->ec_watchdog_jiffies = jiffies; + } else { + strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + + /* print pcie link status and MAC address */ + pcie_print_link_status(pdev); + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* Disable EEE for internal PHY devices */ + hw->dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + igc_set_eee_i225(hw, false, false, false); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + igc_release_hw_control(adapter); +err_eeprom: + if (!igc_check_reset_block(hw)) + igc_reset_phy(hw); +err_sw_init: + igc_clear_interrupt_scheme(adapter); + iounmap(adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igc_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igc_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void igc_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); + irq_work_sync(&adapter->ec_watchdog_kicker); + ecdev_withdraw(get_ecdev(adapter)); + } + + pm_runtime_get_noresume(&pdev->dev); + + igc_flush_nfc_rules(adapter); + + igc_ptp_stop(adapter); + + pci_disable_ptm(pdev); + pci_clear_master(pdev); + + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + if (!get_ecdev(adapter)) { + unregister_netdev(netdev); + } + + igc_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, adapter->io_addr); + pci_release_mem_regions(pdev); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; + struct igc_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + bool wake; + + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); + } else { + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igc_close(netdev, true); + } + + igc_ptp_suspend(adapter); + + igc_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_LU) + wufc &= ~IGC_WUFC_LNKC; + + if (wufc) { + igc_setup_rctl(adapter); + igc_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IGC_WUFC_MC) { + rctl = rd32(IGC_RCTL); + rctl |= IGC_RCTL_MPE; + wr32(IGC_RCTL, rctl); + } + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_ADVD3WUC; + wr32(IGC_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igc_disable_pcie_master(hw); + + wr32(IGC_WUC, IGC_WUC_PME_EN); + wr32(IGC_WUFC, wufc); + } else { + wr32(IGC_WUC, 0); + wr32(IGC_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igc_power_down_phy_copper_base(&adapter->hw); + else + igc_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int __maybe_unused igc_runtime_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 1); +} + +static void igc_deliver_wake_packet(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if (wupl == 0 || wupl > IGC_WUPM_BYTES) + return; + + skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igc_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + val = rd32(IGC_WUS); + if (val & WAKE_PKT_WUS) + igc_deliver_wake_packet(netdev); + + wr32(IGC_WUS, ~0); + + if (get_ecdev(adapter)) { + ecdev_open(get_ecdev(adapter)); + } else { + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igc_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + } + return err; +} + +static int __maybe_unused igc_runtime_resume(struct device *dev) +{ + return igc_resume(dev); +} + +static int __maybe_unused igc_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igc_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (!igc_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} +#endif /* CONFIG_PM */ + +static void igc_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igc_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * igc_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current PCI connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igc_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igc_io_slot_reset - called after the PCI bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igc_resume routine. + **/ +static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + netdev_err(netdev, "Could not re-enable PCI device after reset\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igc_reset(adapter); + wr32(IGC_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igc_io_resume - called when traffic can start to flow again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igc_resume routine. + */ +static void igc_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { + netdev_err(netdev, "igc_open failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers igc_err_handler = { + .error_detected = igc_io_error_detected, + .slot_reset = igc_io_slot_reset, + .resume = igc_io_resume, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops igc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) + SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle) +}; +#endif + +static struct pci_driver igc_driver = { + .name = igc_driver_name, + .id_table = igc_pci_tbl, + .probe = igc_probe, + .remove = igc_remove, +#ifdef CONFIG_PM + .driver.pm = &igc_pm_ops, +#endif + .shutdown = igc_shutdown, + .err_handler = &igc_err_handler, +}; + +/** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + +/** + * igc_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + */ +struct net_device *igc_get_hw_dev(struct igc_hw *hw) +{ + struct igc_adapter *adapter = hw->back; + + return adapter->netdev; +} + +static void igc_disable_rx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(IGC_RXDCTL(idx)); + rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; + rxdctl |= IGC_RXDCTL_SWFLUSH; + wr32(IGC_RXDCTL(idx), rxdctl); +} + +void igc_disable_rx_ring(struct igc_ring *ring) +{ + igc_disable_rx_ring_hw(ring); + igc_clean_rx_ring(ring); +} + +void igc_enable_rx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_rx_ring(adapter, ring); + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +} + +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +void igc_disable_tx_ring(struct igc_ring *ring) +{ + igc_disable_tx_ring_hw(ring); + igc_clean_tx_ring(ring); +} + +void igc_enable_tx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_tx_ring(adapter, ring); +} + +/** + * igc_init_module - Driver Registration Routine + * + * igc_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init igc_init_module(void) +{ + int ret; + + pr_info("%s\n", igc_driver_string); + pr_info("%s\n", igc_copyright); + + ret = pci_register_driver(&igc_driver); + return ret; +} + +module_init(igc_init_module); + +/** + * igc_exit_module - Driver Exit Cleanup Routine + * + * igc_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit igc_exit_module(void) +{ + pci_unregister_driver(&igc_driver); +} + +module_exit(igc_exit_module); +/* igc_main.c */ diff --git a/devices/igc/igc_main-6.1-orig.c b/devices/igc/igc_main-6.1-orig.c new file mode 100644 index 00000000..3509974c --- /dev/null +++ b/devices/igc/igc_main-6.1-orig.c @@ -0,0 +1,7153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "igc.h" +#include "igc_hw.h" +#include "igc_tsn.h" +#include "igc_xdp.h" + +#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IGC_XDP_PASS 0 +#define IGC_XDP_CONSUMED BIT(0) +#define IGC_XDP_TX BIT(1) +#define IGC_XDP_REDIRECT BIT(2) + +static int debug = -1; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL v2"); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +char igc_driver_name[] = "igc"; +static const char igc_driver_string[] = DRV_SUMMARY; +static const char igc_copyright[] = + "Copyright(c) 2018 Intel Corporation."; + +static const struct igc_info *igc_info_tbl[] = { + [board_base] = &igc_base_info, +}; + +static const struct pci_device_id igc_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igc_pci_tbl); + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +void igc_reset(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition PBA for greater than 9k MTU if required */ + pba = IGC_PBA_34K; + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + hw->mac.ops.reset_hw(hw); + + if (hw->mac.ops.init_hw(hw)) + netdev_err(dev, "Error on hardware initialization\n"); + + /* Re-establish EEE setting */ + igc_set_eee_i225(hw, true, true, true); + + if (!netif_running(adapter->netdev)) + igc_power_down_phy_copper_base(&adapter->hw); + + /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ + wr32(IGC_VET, ETH_P_8021Q); + + /* Re-enable PTP, where applicable. */ + igc_ptp_reset(adapter); + + /* Re-enable TSN offloading, where applicable. */ + igc_tsn_reset(adapter); + + igc_get_phy_info(hw); +} + +/** + * igc_power_up_link - Power up the phy link + * @adapter: address of board private structure + */ +static void igc_power_up_link(struct igc_adapter *adapter) +{ + igc_reset_phy(&adapter->hw); + + igc_power_up_phy_copper(&adapter->hw); + + igc_setup_link(&adapter->hw); +} + +/** + * igc_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void igc_release_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + if (!pci_device_is_present(adapter->pdev)) + return; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + */ +static void igc_get_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) +{ + dma_unmap_single(dev, dma_unmap_addr(buf, dma), + dma_unmap_len(buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(buf, len, 0); +} + +/** + * igc_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; + + while (i != tx_ring->next_to_use) { + union igc_adv_tx_desc *eop_desc, *tx_desc; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + dev_kfree_skb_any(tx_buffer->skb); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGC_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* Zero out the buffer ring */ + memset(tx_ring->tx_buffer_info, 0, + sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igc_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +void igc_free_tx_resources(struct igc_ring *tx_ring) +{ + igc_disable_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igc_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void igc_free_all_tx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igc_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_tx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igc_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igc_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + */ +int igc_setup_tx_resources(struct igc_ring *tx_ring) +{ + struct net_device *ndev = tx_ring->netdev; + struct device *dev = tx_ring->dev; + int size = 0; + + size = sizeof(struct igc_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_tx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igc_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + netdev_err(dev, "Error on Tx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } +} + +static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) +{ + struct igc_rx_buffer *bi; + u16 i; + + for (i = 0; i < ring->count; i++) { + bi = &ring->rx_buffer_info[i]; + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} + +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *ring) +{ + if (ring->xsk_pool) + igc_clean_rx_ring_xsk_pool(ring); + else + igc_clean_rx_ring_page_shared(ring); + + clear_ring_uses_large_buffer(ring); + + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +/** + * igc_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_rx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igc_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igc_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +void igc_free_rx_resources(struct igc_ring *rx_ring) +{ + igc_clean_rx_ring(rx_ring); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igc_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void igc_free_all_rx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igc_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igc_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + */ +int igc_setup_rx_resources(struct igc_ring *rx_ring) +{ + struct net_device *ndev = rx_ring->netdev; + struct device *dev = rx_ring->dev; + u8 index = rx_ring->queue_index; + int size, desc_len, res; + + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, + rx_ring->q_vector->napi.napi_id); + if (res < 0) { + netdev_err(ndev, "Failed to register xdp_rxq index %u\n", + index); + return res; + } + + size = sizeof(struct igc_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + desc_len = sizeof(union igc_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_rx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igc_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + netdev_err(dev, "Error on Rx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + if (!igc_xdp_is_enabled(adapter) || + !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) + return NULL; + + return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); +} + +/** + * igc_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + union igc_adv_rx_desc *rx_desc; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + u64 rdba = ring->dma; + u32 buf_size; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } + + if (igc_xdp_is_enabled(adapter)) + set_ring_uses_large_buffer(ring); + + /* disable the queue */ + wr32(IGC_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(IGC_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(IGC_RDBAH(reg_idx), rdba >> 32); + wr32(IGC_RDLEN(reg_idx), + ring->count * sizeof(union igc_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); + wr32(IGC_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* reset next-to- use/clean to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGC_RXBUFFER_3072; + else + buf_size = IGC_RXBUFFER_2048; + + srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + wr32(IGC_SRRCTL(reg_idx), srrctl); + + rxdctl |= IGC_RX_PTHRESH; + rxdctl |= IGC_RX_HTHRESH << 8; + rxdctl |= IGC_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igc_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGC_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; + + wr32(IGC_RXDCTL(reg_idx), rxdctl); +} + +/** + * igc_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx(struct igc_adapter *adapter) +{ + int i; + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igc_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + */ +static void igc_configure_tx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + u32 txdctl = 0; + + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + + /* disable the queue */ + wr32(IGC_TXDCTL(reg_idx), 0); + wrfl(); + mdelay(10); + + wr32(IGC_TDLEN(reg_idx), + ring->count * sizeof(union igc_adv_tx_desc)); + wr32(IGC_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(IGC_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); + wr32(IGC_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGC_TX_PTHRESH; + txdctl |= IGC_TX_HTHRESH << 8; + txdctl |= IGC_TX_WTHRESH << 16; + + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + wr32(IGC_TXDCTL(reg_idx), txdctl); +} + +/** + * igc_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + */ +static void igc_configure_tx(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igc_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + */ +static void igc_setup_mrqc(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); +} + +/** + * igc_setup_rctl - configure the receive control registers + * @adapter: Board private structure + */ +static void igc_setup_rctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(IGC_RCTL); + + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); + + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + /* enable stripping of CRC. Newer features require + * that the HW strips the CRC. + */ + rctl |= IGC_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= IGC_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(IGC_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in set_rx_mode + */ + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ + IGC_RCTL_BAM | /* RX All Bcast Pkts */ + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ + } + + wr32(IGC_RCTL, rctl); +} + +/** + * igc_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + */ +static void igc_setup_tctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which icould be enabled by default */ + wr32(IGC_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); + + /* Enable transmits */ + tctl |= IGC_TCTL_EN; + + wr32(IGC_TCTL, tctl); +} + +/** + * igc_set_mac_filter_hw() - Set MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be set + * @index: Filter index + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + */ +static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, + enum igc_mac_filter_type type, + const u8 *addr, int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 ral, rah; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + ral = le32_to_cpup((__le32 *)(addr)); + rah = le16_to_cpup((__le16 *)(addr + 4)); + + if (type == IGC_MAC_FILTER_TYPE_SRC) { + rah &= ~IGC_RAH_ASEL_MASK; + rah |= IGC_RAH_ASEL_SRC_ADDR; + } + + if (queue >= 0) { + rah &= ~IGC_RAH_QSEL_MASK; + rah |= (queue << IGC_RAH_QSEL_SHIFT); + rah |= IGC_RAH_QSEL_ENABLE; + } + + rah |= IGC_RAH_AV; + + wr32(IGC_RAL(index), ral); + wr32(IGC_RAH(index), rah); + + netdev_dbg(dev, "MAC address filter set in HW: index %d", index); +} + +/** + * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be cleared + * @index: Filter index + */ +static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + wr32(IGC_RAL(index), 0); + wr32(IGC_RAH(index), 0); + + netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igc_set_default_mac_filter(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + u8 *addr = adapter->hw.mac.addr; + + netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); + + igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +/** + * igc_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int igc_set_mac(struct net_device *netdev, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igc_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igc_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igc_write_mc_addr_list(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igc_update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igc_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) +{ + struct igc_adapter *adapter = netdev_priv(ring->netdev); + ktime_t cycle_time = adapter->cycle_time; + ktime_t base_time = adapter->base_time; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; + u32 launchtime; + s64 n; + + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; + + if (ktime_compare(txtime, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. + */ + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; + + return cpu_to_le32(launchtime); +} + +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + err = igc_init_empty_frame(ring, first, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + __le32 launch_time, bool first_flag, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For i225, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->launch_time = launch_time; +} + +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); +} + +static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* memory barriier comment */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igc_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + if (igc_desc_unused(tx_ring) >= size) + return 0; + return __igc_maybe_stop_tx(tx_ring, size); +} + +#define IGC_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, + IGC_ADVTXD_DCMD_VLE); + + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igc_tx_olinfo_status(struct igc_ring *tx_ring, + union igc_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; + + /* insert L4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * + ((IGC_TXD_POPTS_TXSM << 8) / + IGC_TX_FLAGS_CSUM); + + /* insert IPv4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * + (((IGC_TXD_POPTS_IXSM << 8)) / + IGC_TX_FLAGS_IPV4); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int igc_tx_map(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 tx_flags = first->tx_flags; + skb_frag_t *frag; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + dma_addr_t dma; + u32 cmd_type; + + cmd_type = igc_tx_cmd_type(skb, tx_flags); + tx_desc = IGC_TX_DESC(tx_ring, i); + + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGC_MAX_DATA_PER_TXD; + size -= IGC_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGC_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + + return 0; +dma_error: + netdev_err(tx_ring->netdev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); + + return 1; +} + +static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, + struct igc_ring *tx_ring) +{ + bool first_flag = false, insert_empty = false; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + struct igc_tx_buffer *first; + __le32 launch_time = 0; + u32 tx_flags = 0; + unsigned short f; + ktime_t txtime; + u8 hdr_len = 0; + int tso = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igc_maybe_stop_tx(tx_ring, count + 5)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + struct igc_tx_buffer *empty_info; + struct sk_buff *empty; + void *data; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (!empty) + goto done; + + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + if (igc_init_tx_empty_descriptor(tx_ring, + empty, + empty_info) < 0) + dev_kfree_skb_any(empty); + } + +done: + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGC_TX_BUFFER_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + + /* FIXME: add support for retrieving timestamps from + * the other timer registers before skipping the + * timestamping request. + */ + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGC_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first, launch_time, first_flag); + + igc_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); +} + +static void igc_rx_checksum(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igc_test_staterr(rx_desc, + IGC_RXDEXT_STATERR_L4E | + IGC_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets (aka let the stack check the crc32c) + */ + if (!(skb->len == 60 && + test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | + IGC_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netdev_dbg(ring->netdev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +static inline void igc_rx_hash(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +static void igc_rx_vlan(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { + if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && + test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/** + * igc_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in order + * to populate the hash, checksum, VLAN, protocol, and other fields within the + * skb. + */ +static void igc_process_skb_fields(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + igc_rx_hash(rx_ring, rx_desc, skb); + + igc_rx_checksum(rx_ring, rx_desc, skb); + + igc_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl |= IGC_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~IGC_CTRL_VME; + } + wr32(IGC_CTRL, ctrl); +} + +static void igc_restore_vlan(struct igc_adapter *adapter) +{ + igc_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, + const unsigned int size, + int *rx_buffer_pgcnt) +{ + struct igc_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, + unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) + buffer->page_offset ^= truesize; +#else + buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(ring) / 2; +#else + truesize = ring_uses_build_skb(ring) ? + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +/** + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + */ +static void igc_add_rx_frag(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(rx_ring) / 2; +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + + igc_rx_buffer_flip(rx_buffer, truesize); +} + +static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp) +{ + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = napi_build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); + + igc_rx_buffer_flip(rx_buffer, truesize); + return skb; +} + +static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + void *va = xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGC_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + igc_rx_buffer_flip(rx_buffer, truesize); + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * igc_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void igc_reuse_rx_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct igc_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igc_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool igc_is_non_eop(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGC_RX_DESC(rx_ring, ntc)); + + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igc_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool igc_cleanup_headers(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void igc_put_rx_buffer(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + igc_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) +{ + struct igc_adapter *adapter = rx_ring->q_vector->adapter; + + if (ring_uses_build_skb(rx_ring)) + return IGC_SKB_PAD; + if (igc_xdp_is_enabled(adapter)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igc_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igc_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring + * @cleaned_count: number of buffers to clean + */ +static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) +{ + union igc_adv_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct igc_rx_buffer *bi; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGC_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igc_rx_bufsz(rx_ring); + + do { + if (!igc_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGC_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) +{ + union igc_adv_rx_desc *desc; + u16 i = ring->next_to_use; + struct igc_rx_buffer *bi; + dma_addr_t dma; + bool ok = true; + + if (!count) + return ok; + + desc = IGC_RX_DESC(ring, i); + bi = &ring->rx_buffer_info[i]; + i -= ring->count; + + do { + bi->xdp = xsk_buff_alloc(ring->xsk_pool); + if (!bi->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(bi->xdp); + desc->read.pkt_addr = cpu_to_le64(dma); + + desc++; + bi++; + i++; + if (unlikely(!i)) { + desc = IGC_RX_DESC(ring, 0); + bi = ring->rx_buffer_info; + i -= ring->count; + } + + /* Clear the length for the next_to_use descriptor. */ + desc->wb.upper.length = 0; + + count--; + } while (count); + + i += ring->count; + + if (ring->next_to_use != i) { + ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, ring->tail); + } + + return ok; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, + struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, index = ring->next_to_use; + struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; + struct igc_tx_buffer *buffer = head; + union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); + u32 olinfo_status, len = xdpf->len, cmd_type; + void *data = xdpf->data; + u16 i; + + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); + + if (igc_maybe_stop_tx(ring, count + 3)) { + /* this is a hard error */ + return -EBUSY; + } + + i = 0; + head->bytecount = xdp_get_frame_len(xdpf); + head->type = IGC_TX_BUFFER_TYPE_XDP; + head->gso_segs = 1; + head->xdpf = xdpf; + + olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + for (;;) { + dma_addr_t dma; + + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, + "Failed to map DMA for TX\n"); + goto unmap; + } + + dma_unmap_len_set(buffer, len, len); + dma_unmap_addr_set(buffer, dma, dma); + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | len; + + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.buffer_addr = cpu_to_le64(dma); + + buffer->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + buffer = &ring->tx_buffer_info[index]; + desc = IGC_TX_DESC(ring, index); + desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(ring), head->bytecount); + /* set the timestamp */ + head->time_stamp = jiffies; + /* set next_to_watch value indicating a packet is present */ + head->next_to_watch = desc; + ring->next_to_use = index; + + return 0; + +unmap: + for (;;) { + buffer = &ring->tx_buffer_info[index]; + if (dma_unmap_len(buffer, len)) + dma_unmap_page(ring->dev, + dma_unmap_addr(buffer, dma), + dma_unmap_len(buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(buffer, len, 0); + if (buffer == head) + break; + + if (!index) + index += ring->count; + index--; + } + + return -ENOMEM; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= adapter->num_tx_queues) + index -= adapter->num_tx_queues; + + return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int res; + + if (unlikely(!xdpf)) + return -EFAULT; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + res = igc_xdp_init_tx_descriptor(ring, xdpf); + __netif_tx_unlock(nq); + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __igc_xdp_run_prog(struct igc_adapter *adapter, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + return IGC_XDP_PASS; + case XDP_TX: + if (igc_xdp_xmit_back(adapter, xdp) < 0) + goto out_failure; + return IGC_XDP_TX; + case XDP_REDIRECT: + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + goto out_failure; + return IGC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + return IGC_XDP_CONSUMED; + } +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(adapter->xdp_prog); + if (!prog) { + res = IGC_XDP_PASS; + goto out; + } + + res = __igc_xdp_run_prog(adapter, prog, xdp); + +out: + return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ + /* Once tail pointer is updated, hardware can fetch the descriptors + * any time so we issue a write membar here to ensure all memory + * writes are complete before the tail pointer is updated. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + + if (status & IGC_XDP_TX) { + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + igc_flush_tx_descriptors(ring); + __netif_tx_unlock(nq); + } + + if (status & IGC_XDP_REDIRECT) + xdp_do_flush(); +} + +static void igc_update_rx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + +static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = igc_desc_unused(rx_ring); + int xdp_status = 0, rx_buffer_pgcnt; + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *rx_desc; + struct igc_rx_buffer *rx_buffer; + unsigned int size, truesize; + ktime_t timestamp = 0; + struct xdp_buff xdp; + int pkt_offset = 0; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { + igc_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); + truesize = igc_get_rx_frame_truesize(rx_ring, size); + + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + pktbuf); + pkt_offset = IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + if (!skb) { + xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); + xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), + igc_rx_offset(rx_ring) + pkt_offset, + size, true); + xdp_buff_clear_frags_flag(&xdp); + + skb = igc_xdp_run_prog(adapter, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + switch (xdp_res) { + case IGC_XDP_CONSUMED: + rx_buffer->pagecnt_bias++; + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + igc_rx_buffer_flip(rx_buffer, truesize); + xdp_status |= xdp_res; + break; + } + + total_packets++; + total_bytes += size; + } else if (skb) + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igc_build_skb(rx_ring, rx_buffer, &xdp); + else + skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, + timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igc_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + igc_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (cleaned_count) + igc_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, + struct xdp_buff *xdp) +{ + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + net_prefetch(xdp->data_meta); + + skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + return skb; +} + +static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, + union igc_adv_rx_desc *desc, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + struct igc_ring *ring = q_vector->rx.ring; + struct sk_buff *skb; + + skb = igc_construct_skb_zc(ring, xdp); + if (!skb) { + ring->rx_stats.alloc_failed++; + return; + } + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + if (igc_cleanup_headers(ring, desc, skb)) + return; + + igc_process_skb_fields(ring, desc, skb); + napi_gro_receive(&q_vector->napi, skb); +} + +static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *ring = q_vector->rx.ring; + u16 cleaned_count = igc_desc_unused(ring); + int total_bytes = 0, total_packets = 0; + u16 ntc = ring->next_to_clean; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + + rcu_read_lock(); + + prog = READ_ONCE(adapter->xdp_prog); + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *desc; + struct igc_rx_buffer *bi; + ktime_t timestamp = 0; + unsigned int size; + int res; + + desc = IGC_RX_DESC(ring, ntc); + size = le16_to_cpu(desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = &ring->rx_buffer_info[ntc]; + + if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + bi->xdp->data); + + bi->xdp->data += IGC_TS_HDR_LEN; + + /* HW timestamp has been copied into local variable. Metadata + * length when XDP program is called should be 0. + */ + bi->xdp->data_meta += IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + + res = __igc_xdp_run_prog(adapter, prog, bi->xdp); + switch (res) { + case IGC_XDP_PASS: + igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + fallthrough; + case IGC_XDP_CONSUMED: + xsk_buff_free(bi->xdp); + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + bi->xdp = NULL; + total_bytes += size; + total_packets++; + cleaned_count++; + ntc++; + if (ntc == ring->count) + ntc = 0; + } + + ring->next_to_clean = ntc; + rcu_read_unlock(); + + if (cleaned_count >= IGC_RX_BUFFER_WRITE) + failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (xsk_uses_need_wakeup(ring->xsk_pool)) { + if (failure || ring->next_to_clean == ring->next_to_use) + xsk_set_rx_need_wakeup(ring->xsk_pool); + else + xsk_clear_rx_need_wakeup(ring->xsk_pool); + return total_packets; + } + + return failure ? budget : total_packets; +} + +static void igc_update_tx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->tx.ring; + + u64_stats_update_begin(&ring->tx_syncp); + ring->tx_stats.bytes += bytes; + ring->tx_stats.packets += packets; + u64_stats_update_end(&ring->tx_syncp); + + q_vector->tx.total_bytes += bytes; + q_vector->tx.total_packets += packets; +} + +static void igc_xdp_xmit_zc(struct igc_ring *ring) +{ + struct xsk_buff_pool *pool = ring->xsk_pool; + struct netdev_queue *nq = txring_txq(ring); + union igc_adv_tx_desc *tx_desc = NULL; + int cpu = smp_processor_id(); + u16 ntu = ring->next_to_use; + struct xdp_desc xdp_desc; + u16 budget; + + if (!netif_carrier_ok(ring->netdev)) + return; + + __netif_tx_lock(nq, cpu); + + budget = igc_desc_unused(ring); + + while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + u32 cmd_type, olinfo_status; + struct igc_tx_buffer *bi; + dma_addr_t dma; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + xdp_desc.len; + olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; + + dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + + tx_desc = IGC_TX_DESC(ring, ntu); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + bi = &ring->tx_buffer_info[ntu]; + bi->type = IGC_TX_BUFFER_TYPE_XSK; + bi->protocol = 0; + bi->bytecount = xdp_desc.len; + bi->gso_segs = 1; + bi->time_stamp = jiffies; + bi->next_to_watch = tx_desc; + + netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); + + ntu++; + if (ntu == ring->count) + ntu = 0; + } + + ring->next_to_use = ntu; + if (tx_desc) { + igc_flush_tx_descriptors(ring); + xsk_tx_release(pool); + } + + __netif_tx_unlock(nq); +} + +/** + * igc_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + */ +static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + struct igc_ring *tx_ring = q_vector->tx.ring; + unsigned int i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 xsk_frames = 0; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGC_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + napi_consume_skb(tx_buffer->skb, napi_budget); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + igc_update_tx_stats(q_vector, total_packets, total_bytes); + + if (tx_ring->xsk_pool) { + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) + xsk_set_tx_need_wakeup(tx_ring->xsk_pool); + igc_xdp_xmit_zc(tx_ring); + } + + if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct igc_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && + (rd32(IGC_TDH(tx_ring->reg_idx)) != + readl(tx_ring->tail))) { + /* detected Tx unit hang */ + netdev_err(tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + netif_carrier_ok(tx_ring->netdev) && + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGC_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +static int igc_find_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 ral, rah; + int i; + + for (i = 0; i < max_entries; i++) { + ral = rd32(IGC_RAL(i)); + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + continue; + if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) + continue; + if ((rah & IGC_RAH_RAH_MASK) != + le16_to_cpup((__le16 *)(addr + 4))) + continue; + if (ral != le32_to_cpup((__le32 *)(addr))) + continue; + + return i; + } + + return -1; +} + +static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 rah; + int i; + + for (i = 0; i < max_entries; i++) { + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + return i; + } + + return -1; +} + +/** + * igc_add_mac_filter() - Add MAC address filter + * @adapter: Pointer to adapter where the filter should be added + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr, + int queue) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index >= 0) + goto update_filter; + + index = igc_get_avail_mac_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", + index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr, queue); + +update_filter: + igc_set_mac_filter_hw(adapter, index, type, addr, queue); + return 0; +} + +/** + * igc_del_mac_filter() - Delete MAC address filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @type: MAC address filter type (source or destination) + * @addr: MAC address + */ +static void igc_del_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index < 0) + return; + + if (index == 0) { + /* If this is the default filter, we don't actually delete it. + * We just reset to its default value i.e. disable queue + * assignment. + */ + netdev_dbg(dev, "Disable default MAC filter queue assignment"); + + igc_set_mac_filter_hw(adapter, 0, type, addr, -1); + } else { + netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", + index, + type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr); + + igc_clear_mac_filter_hw(adapter, index); + } +} + +/** + * igc_add_vlan_prio_filter() - Add VLAN priority filter + * @adapter: Pointer to adapter where the filter should be added + * @prio: VLAN priority value + * @queue: Queue number which matching frames are assigned to + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, + int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + if (vlanpqf & IGC_VLANPQF_VALID(prio)) { + netdev_dbg(dev, "VLAN priority filter already in use\n"); + return -EEXIST; + } + + vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); + vlanpqf |= IGC_VLANPQF_VALID(prio); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", + prio, queue); + return 0; +} + +/** + * igc_del_vlan_prio_filter() - Delete VLAN priority filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @prio: VLAN priority value + */ +static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) +{ + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + vlanpqf &= ~IGC_VLANPQF_VALID(prio); + vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", + prio); +} + +static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if (!(etqf & IGC_ETQF_FILTER_ENABLE)) + return i; + } + + return -1; +} + +/** + * igc_add_etype_filter() - Add ethertype filter + * @adapter: Pointer to adapter where the filter should be added + * @etype: Ethertype value + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, + int queue) +{ + struct igc_hw *hw = &adapter->hw; + int index; + u32 etqf; + + index = igc_get_avail_etype_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + etqf = rd32(IGC_ETQF(index)); + + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= etype; + + if (queue >= 0) { + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); + etqf |= IGC_ETQF_QUEUE_ENABLE; + } + + etqf |= IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(index), etqf); + + netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", + etype, queue); + return 0; +} + +static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) + return i; + } + + return -1; +} + +/** + * igc_del_etype_filter() - Delete ethertype filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @etype: Ethertype value + */ +static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int index; + + index = igc_find_etype_filter(adapter, etype); + if (index < 0) + return; + + wr32(IGC_ETQF(index), 0); + + netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", + etype); +} + +static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + +static int igc_enable_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + int err; + + if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_add_etype_filter(adapter, rule->filter.etype, + rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + err = igc_add_vlan_prio_filter(adapter, prio, rule->action); + if (err) + return err; + } + + return 0; +} + +static void igc_disable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_del_etype_filter(adapter, rule->filter.etype); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + igc_del_vlan_prio_filter(adapter, prio); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr); +} + +/** + * igc_get_nfc_rule() - Get NFC rule + * @adapter: Pointer to adapter + * @location: Rule location + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: Pointer to NFC rule at @location. If not found, NULL. + */ +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location) +{ + struct igc_nfc_rule *rule; + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (rule->location == location) + return rule; + if (rule->location > location) + break; + } + + return NULL; +} + +/** + * igc_del_nfc_rule() - Delete NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be deleted + * + * Disable NFC rule in hardware and delete it from adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + */ +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + igc_disable_nfc_rule(adapter, rule); + + list_del(&rule->list); + adapter->nfc_rule_count--; + + kfree(rule); +} + +static void igc_flush_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule, *tmp; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +/** + * igc_add_nfc_rule() - Add NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be added + * + * Enable NFC rule in hardware and add it to adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 on success, negative errno on failure. + */ +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + struct igc_nfc_rule *pred, *cur; + int err; + + err = igc_enable_nfc_rule(adapter, rule); + if (err) + return err; + + pred = NULL; + list_for_each_entry(cur, &adapter->nfc_rule_list, list) { + if (cur->location >= rule->location) + break; + pred = cur; + } + + list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); + adapter->nfc_rule_count++; + return 0; +} + +static void igc_restore_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) + igc_enable_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); + return 0; +} + +/** + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + */ +static void igc_set_rx_mode(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IGC_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igc_write_mc_addr_list(netdev); + if (count < 0) + rctl |= IGC_RCTL_MPE; + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) + rctl |= IGC_RCTL_UPE; + + /* update state of unicast and multicast */ + rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); + wr32(IGC_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) + rlpml = IGC_MAX_FRAME_BUILD_SKB; +#endif + wr32(IGC_RLPML, rlpml); +} + +/** + * igc_configure - configure the hardware for RX and TX + * @adapter: private board structure + */ +static void igc_configure(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i = 0; + + igc_get_hw_control(adapter); + igc_set_rx_mode(netdev); + + igc_restore_vlan(adapter); + + igc_setup_tctl(adapter); + igc_setup_mrqc(adapter); + igc_setup_rctl(adapter); + + igc_set_default_mac_filter(adapter); + igc_restore_nfc_rules(adapter); + + igc_configure_tx(adapter); + igc_configure_rx(adapter); + + igc_rx_fifo_flush_base(&adapter->hw); + + /* call igc_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); + } +} + +/** + * igc_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + */ +static void igc_write_ivar(struct igc_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(IGC_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; + + array_wr32(IGC_IVAR0, index, ivar); +} + +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + int rx_queue = IGC_N0_QUEUE; + int tx_queue = IGC_N0_QUEUE; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case igc_i225: + if (rx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igc_configure_msix - Configure MSI-X hardware + * @adapter: Pointer to adapter structure + * + * igc_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + */ +static void igc_configure_msix(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i, vector = 0; + u32 tmp; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case igc_i225: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | IGC_IVAR_VALID) << 8; + + wr32(IGC_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igc_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void igc_irq_enable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; + u32 regval = rd32(IGC_EIAC); + + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(IGC_EIAM); + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); + wr32(IGC_EIMS, adapter->eims_enable_mask); + wr32(IGC_IMS, ims); + } else { + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + } +} + +/** + * igc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void igc_irq_disable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 regval = rd32(IGC_EIAM); + + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); + wr32(IGC_EIMC, adapter->eims_enable_mask); + regval = rd32(IGC_EIAC); + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(IGC_IAM, 0); + wr32(IGC_IMC, ~0); + wrfl(); + + if (adapter->msix_entries) { + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) +{ + /* Determine if we need to pair queues. */ + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; +} + +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +{ + return IGC_MAX_RX_QUEUES; +} + +static void igc_init_queue_configuration(struct igc_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igc_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igc_set_flag_queue_pairs(adapter, max_rss_queues); +} + +/** + * igc_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igc_free_q_vector. + */ +static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* if we're coming from igc_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); +} + +/** + * igc_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + */ +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igc_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igc_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + */ +static void igc_free_q_vectors(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igc_reset_q_vector(adapter, v_idx); + igc_free_q_vector(adapter, v_idx); + } +} + +/** + * igc_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + */ +static void igc_update_itr(struct igc_q_vector *q_vector, + struct igc_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes / packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igc_set_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + current_itr = 0; + new_itr = IGC_4K_ITR; + goto set_itr_now; + default: + break; + } + + igc_update_itr(q_vector, &q_vector->tx); + igc_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igc_reset_interrupt_capability(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } + + while (v_idx--) + igc_reset_q_vector(adapter, v_idx); +} + +/** + * igc_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: Pointer to adapter structure + * @msix: boolean value for MSI-X capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix) +{ + int numvecs, i; + int err; + + if (!msix) + goto msi_only; + adapter->flags |= IGC_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) + return; + + /* populate entry values */ + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + igc_reset_interrupt_capability(adapter); + +msi_only: + adapter->flags &= ~IGC_FLAG_HAS_MSIX; + + adapter->rss_queues = 1; + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGC_FLAG_HAS_MSI; +} + +/** + * igc_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igc_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + */ +static void igc_update_ring_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGC_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGC_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGC_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +static void igc_ring_irq_enable(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igc_set_itr(q_vector); + else + igc_update_ring_itr(q_vector); + } + + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(IGC_EIMS, q_vector->eims_value); + else + igc_irq_enable(adapter); + } +} + +static void igc_add_ring(struct igc_ring *ring, + struct igc_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igc_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + */ +static void igc_cache_ring_register(struct igc_adapter *adapter) +{ + int i = 0, j = 0; + + switch (adapter->hw.mac.type) { + case igc_i225: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = j; + break; + } +} + +/** + * igc_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + */ +static int igc_poll(struct napi_struct *napi, int budget) +{ + struct igc_q_vector *q_vector = container_of(napi, + struct igc_q_vector, + napi); + struct igc_ring *rx_ring = q_vector->rx.ring; + bool clean_complete = true; + int work_done = 0; + + if (q_vector->tx.ring) + clean_complete = igc_clean_tx_irq(q_vector, budget); + + if (rx_ring) { + int cleaned = rx_ring->xsk_pool ? + igc_clean_rx_irq_zc(q_vector, budget) : + igc_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igc_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igc_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int igc_alloc_q_vector(struct igc_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct igc_q_vector *q_vector; + struct igc_ring *ring; + int ring_count; + + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + else + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); + q_vector->itr_val = IGC_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igc_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igc_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * igc_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int igc_alloc_q_vectors(struct igc_adapter *adapter) +{ + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igc_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: Pointer to adapter structure + * @msix: boolean for MSI-X capability + * + * This function initializes the interrupts and allocates all of the queues. + */ +static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) +{ + struct net_device *dev = adapter->netdev; + int err = 0; + + igc_set_interrupt_capability(adapter, msix); + + err = igc_alloc_q_vectors(adapter); + if (err) { + netdev_err(dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igc_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igc_reset_interrupt_capability(adapter); + return err; +} + +/** + * igc_sw_init - Initialize general software structures (struct igc_adapter) + * @adapter: board private structure to initialize + * + * igc_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int igc_sw_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGC_DEFAULT_TXD; + adapter->rx_ring_count = IGC_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGC_DEFAULT_ITR; + adapter->tx_itr_setting = IGC_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; + + /* adjust max frame to be at least the size of a standard frame */ + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + mutex_init(&adapter->nfc_rule_lock); + INIT_LIST_HEAD(&adapter->nfc_rule_list); + adapter->nfc_rule_count = 0; + + spin_lock_init(&adapter->stats64_lock); + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGC_FLAG_HAS_MSIX; + + igc_init_queue_configuration(adapter); + + /* This call may decrease the number of queues */ + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igc_irq_disable(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + return 0; +} + +/** + * igc_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + */ +void igc_up(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i = 0; + + /* hardware has been reset, we need to reload some things */ + igc_configure(adapter); + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + if (adapter->msix_entries) + igc_configure_msix(adapter); + else + igc_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); +} + +/** + * igc_update_stats - Update the board statistics counters + * @adapter: board private structure + */ +void igc_update_stats(struct igc_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.tlpic += rd32(IGC_TLPIC); + adapter->stats.rlpic += rd32(IGC_RLPIC); + adapter->stats.hgptc += rd32(IGC_HGPTC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + adapter->stats.colc += rd32(IGC_RERC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + + adapter->stats.iac += rd32(IGC_IAC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); +} + +/** + * igc_down - Close the interface + * @adapter: board private structure + */ +void igc_down(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i = 0; + + set_bit(__IGC_DOWN, &adapter->state); + + igc_ptp_suspend(adapter); + + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } + /* set trans_start so we don't get spurious watchdogs during reset */ + netif_trans_update(netdev); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); + + igc_irq_disable(adapter); + } + + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igc_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + + igc_clean_all_tx_rings(adapter); + igc_clean_all_rx_rings(adapter); +} + +void igc_reinit_locked(struct igc_adapter *adapter) +{ + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igc_down(adapter); + igc_up(adapter); + clear_bit(__IGC_RESETTING, &adapter->state); +} + +static void igc_reset_task(struct work_struct *work) +{ + struct igc_adapter *adapter; + + adapter = container_of(work, struct igc_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igc_rings_dump(adapter); + igc_regs_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igc_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igc_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int igc_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(netdev, "Jumbo frames not supported with XDP"); + return -EINVAL; + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igc_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igc_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igc_up(adapter); + else + igc_reset(adapter); + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +/** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, + unsigned int __always_unused txqueue) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + wr32(IGC_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +/** + * igc_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + */ +static void igc_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + if (!test_bit(__IGC_RESETTING, &adapter->state)) + igc_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igc_vlan_mode(netdev, features); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) + igc_flush_nfc_rules(adapter); + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igc_tsync_interrupt(struct igc_adapter *adapter) +{ + u32 ack, tsauxc, sec, nsec, tsicr; + struct igc_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } + + if (tsicr & IGC_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= IGC_TSICR_TXTS; + } + + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(IGC_TSICR, ack); +} + +/** + * igc_msix_other - msix other interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t igc_msix_other(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_hw *hw = &adapter->hw; + u32 icr = rd32(IGC_ICR); + + /* reading ICR causes bit 31 of EICR to be cleared */ + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & IGC_ICR_LSC) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + wr32(IGC_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igc_write_itr(struct igc_q_vector *q_vector) +{ + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = IGC_ITR_VAL_MASK; + + itr_val |= IGC_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igc_msix_ring(int irq, void *data) +{ + struct igc_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igc_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_request_msix - Initialize MSI-X interrupts + * @adapter: Pointer to adapter structure + * + * igc_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + */ +static int igc_request_msix(struct igc_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + int i = 0, err = 0, vector = 0, free_vector = 0; + struct net_device *netdev = adapter->netdev; + + err = request_irq(adapter->msix_entries[vector].vector, + &igc_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igc_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igc_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: Pointer to adapter structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) +{ + igc_free_q_vectors(adapter); + igc_reset_interrupt_capability(adapter); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igc_update_phy_info(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + + igc_get_phy_info(&adapter->hw); +} + +/** + * igc_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + */ +bool igc_has_link(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +/** + * igc_watchdog - Timer Call-back + * @t: timer for the watchdog + */ +static void igc_watchdog(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igc_watchdog_task(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, + struct igc_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, retry_count = 20; + u32 link; + int i; + + link = igc_has_link(adapter); + + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(IGC_CTRL); + /* Link status message must follow this format */ + netdev_info(netdev, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & IGC_CTRL_TFCE) && + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : + (ctrl & IGC_CTRL_RFCE) ? "RX" : + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGC_FLAG_EEE) && + adapter->link_duplex == HALF_DUPLEX) { + netdev_info(netdev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); + adapter->hw.dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igc_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 1; + break; + } + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + netdev_err(netdev, "exceed max 2 second\n"); + } + } else { + netdev_err(netdev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Links status message must follow this format */ + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGC_FLAG_MAS_ENABLE) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGC_FLAG_MAS_ENABLE)) { + if (adapter->flags & IGC_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(IGC_EICS, eics); + } else { + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } + + igc_ptp_tx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +/** + * igc_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr_msi(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(IGC_ICR); + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(IGC_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & IGC_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igc_free_irq(struct igc_adapter *adapter) +{ + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igc_request_irq - initialize interrupts + * @adapter: Pointer to adapter structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static int igc_request_irq(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + err = igc_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + igc_clear_interrupt_scheme(adapter); + err = igc_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + igc_setup_all_tx_resources(adapter); + igc_setup_all_rx_resources(adapter); + igc_configure(adapter); + } + + igc_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGC_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, &igc_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igc_reset_interrupt_capability(adapter); + adapter->flags &= ~IGC_FLAG_HAS_MSI; + } + + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + netdev_err(netdev, "Error %d getting interrupt\n", err); + +request_done: + return err; +} + +/** + * __igc_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: boolean indicating if the device is resuming + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int __igc_open(struct net_device *netdev, bool resuming) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + int err = 0; + int i = 0; + + /* disallow open during test */ + + if (test_bit(__IGC_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igc_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igc_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igc_power_up_link(adapter); + + igc_configure(adapter); + + err = igc_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + netif_tx_start_all_queues(netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + + return IGC_SUCCESS; + +err_set_queues: + igc_free_irq(adapter); +err_req_irq: + igc_release_hw_control(adapter); + igc_power_down_phy_copper_base(&adapter->hw); + igc_free_all_rx_resources(adapter); +err_setup_rx: + igc_free_all_tx_resources(adapter); +err_setup_tx: + igc_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igc_open(struct net_device *netdev) +{ + return __igc_open(netdev, false); +} + +/** + * __igc_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: boolean indicating the device is suspending + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int __igc_close(struct net_device *netdev, bool suspending) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igc_down(adapter); + + igc_release_hw_control(adapter); + + igc_free_irq(adapter); + + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +int igc_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igc_close(netdev, false); + return 0; +} + +/** + * igc_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return igc_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igc_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, + bool enable) +{ + struct igc_ring *ring; + + if (queue < 0 || queue >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) +{ + struct timespec64 b; + + b = ktime_to_timespec64(base_time); + + return timespec64_compare(now, &b) > 0; +} + +static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) +{ + int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct timespec64 now; + size_t n; + + if (qopt->cycle_time_extension) + return false; + + igc_ptp_read(adapter, &now); + + /* If we program the controller's BASET registers with a time + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. + */ + if (!is_base_time_past(qopt->base_time, &now)) + return false; + + for (n = 0; n < qopt->num_entries; n++) { + const struct tc_taprio_sched_entry *e, *prev; + int i; + + prev = n ? &qopt->entries[n - 1] : NULL; + e = &qopt->entries[n]; + + /* i225 only supports "global" frame preemption + * settings. + */ + if (e->command != TC_TAPRIO_CMD_SET_GATES) + return false; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (e->gate_mask & BIT(i)) { + queue_uses[i]++; + + /* There are limitations: A single queue cannot + * be opened and closed multiple times per cycle + * unless the gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) + return false; + } + } + + return true; +} + +static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + } + + return 0; +} + +static int igc_save_qbv_schedule(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; + u32 start_time = 0, end_time = 0; + size_t n; + int i; + + adapter->qbv_enable = qopt->enable; + + if (!qopt->enable) + return igc_tsn_clear_schedule(adapter); + + if (qopt->base_time < 0) + return -ERANGE; + + if (adapter->base_time) + return -EALREADY; + + if (!validate_schedule(adapter, qopt)) + return -EINVAL; + + adapter->cycle_time = qopt->cycle_time; + adapter->base_time = qopt->base_time; + + for (n = 0; n < qopt->num_entries; n++) { + struct tc_taprio_sched_entry *e = &qopt->entries[n]; + + end_time += e->interval; + + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!(e->gate_mask & BIT(i))) + continue; + + /* Check whether a queue stays open for more than one + * entry. If so, keep the start and advance the end + * time. + */ + if (!queue_configured[i]) + ring->start_time = start_time; + ring->end_time = end_time; + + queue_configured[i] = true; + } + + start_time += e->interval; + } + + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + if (!queue_configured[i]) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + + return 0; +} + +static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_qbv_schedule(adapter, qopt); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_TAPRIO: + return igc_tsn_enable_qbv_scheduling(adapter, type_data); + + case TC_SETUP_QDISC_ETF: + return igc_tsn_enable_launchtime(adapter, type_data); + + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return igc_xdp_setup_pool(adapter, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int i, drops; + + if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + + drops = 0; + for (i = 0; i < num_frames; i++) { + int err; + struct xdp_frame *xdpf = frames[i]; + + err = igc_xdp_init_tx_descriptor(ring, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return num_frames - drops; +} + +static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, + struct igc_q_vector *q_vector) +{ + struct igc_hw *hw = &adapter->hw; + u32 eics = 0; + + eics |= q_vector->eims_value; + wr32(IGC_EICS, eics); +} + +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_q_vector *q_vector; + struct igc_ring *ring; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igc_xdp_is_enabled(adapter)) + return -ENXIO; + + if (queue_id >= adapter->num_rx_queues) + return -EINVAL; + + ring = adapter->rx_ring[queue_id]; + + if (!ring->xsk_pool) + return -ENXIO; + + q_vector = adapter->q_vector[queue_id]; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + igc_trigger_rxtxq_interrupt(adapter, q_vector); + + return 0; +} + +static const struct net_device_ops igc_netdev_ops = { + .ndo_open = igc_open, + .ndo_stop = igc_close, + .ndo_start_xmit = igc_xmit_frame, + .ndo_set_rx_mode = igc_set_rx_mode, + .ndo_set_mac_address = igc_set_mac, + .ndo_change_mtu = igc_change_mtu, + .ndo_tx_timeout = igc_tx_timeout, + .ndo_get_stats64 = igc_get_stats64, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, + .ndo_eth_ioctl = igc_ioctl, + .ndo_setup_tc = igc_setup_tc, + .ndo_bpf = igc_bpf, + .ndo_xdp_xmit = igc_xdp_xmit, + .ndo_xsk_wakeup = igc_xsk_wakeup, +}; + +/* PCIe configuration access */ +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_read_word(adapter->pdev, reg, value); + + return IGC_SUCCESS; +} + +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_write_word(adapter->pdev, reg, *value); + + return IGC_SUCCESS; +} + +u32 igc_rd32(struct igc_hw *hw, u32 reg) +{ + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (IGC_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igc->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + WARN(pci_device_is_present(igc->pdev), + "igc: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/** + * igc_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igc_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igc_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring the adapter private structure, + * and a hardware reset occur. + */ +static int igc_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct igc_adapter *adapter; + struct net_device *netdev; + struct igc_hw *hw; + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, igc_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), + IGC_MAX_TX_QUEUES); + + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = pci_save_state(pdev); + if (err) + goto err_ioremap; + + err = -EIO; + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) + goto err_ioremap; + + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igc_netdev_ops; + igc_ethtool_set_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_TSO_ECN; + netdev->features |= NETIF_F_RXCSUM; + netdev->features |= NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_HW_TC; + +#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; + + /* setup the private structure */ + err = igc_sw_init(adapter); + if (err) + goto err_sw_init; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= netdev->features; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + eth_hw_addr_set(netdev, hw->mac.addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* configure RXPBSIZE and TXPBSIZE */ + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igc_reset_task); + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0xaf; + + hw->fc.requested_mode = igc_fc_default; + hw->fc.current_mode = igc_fc_default; + + /* By default, support wake on port A */ + adapter->flags |= IGC_FLAG_WOL_SUPPORTED; + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) + adapter->wol |= IGC_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGC_FLAG_WOL_SUPPORTED); + + igc_ptp_init(adapter); + + igc_tsn_clear_schedule(adapter); + + /* reset the hardware with the new settings */ + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + + /* print pcie link status and MAC address */ + pcie_print_link_status(pdev); + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* Disable EEE for internal PHY devices */ + hw->dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + igc_set_eee_i225(hw, false, false, false); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + igc_release_hw_control(adapter); +err_eeprom: + if (!igc_check_reset_block(hw)) + igc_reset_phy(hw); +err_sw_init: + igc_clear_interrupt_scheme(adapter); + iounmap(adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igc_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igc_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void igc_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_noresume(&pdev->dev); + + igc_flush_nfc_rules(adapter); + + igc_ptp_stop(adapter); + + pci_disable_ptm(pdev); + pci_clear_master(pdev); + + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + unregister_netdev(netdev); + + igc_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, adapter->io_addr); + pci_release_mem_regions(pdev); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; + struct igc_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + bool wake; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igc_close(netdev, true); + + igc_ptp_suspend(adapter); + + igc_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_LU) + wufc &= ~IGC_WUFC_LNKC; + + if (wufc) { + igc_setup_rctl(adapter); + igc_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IGC_WUFC_MC) { + rctl = rd32(IGC_RCTL); + rctl |= IGC_RCTL_MPE; + wr32(IGC_RCTL, rctl); + } + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_ADVD3WUC; + wr32(IGC_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igc_disable_pcie_master(hw); + + wr32(IGC_WUC, IGC_WUC_PME_EN); + wr32(IGC_WUFC, wufc); + } else { + wr32(IGC_WUC, 0); + wr32(IGC_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igc_power_down_phy_copper_base(&adapter->hw); + else + igc_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int __maybe_unused igc_runtime_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 1); +} + +static void igc_deliver_wake_packet(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if (wupl == 0 || wupl > IGC_WUPM_BYTES) + return; + + skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igc_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + val = rd32(IGC_WUS); + if (val & WAKE_PKT_WUS) + igc_deliver_wake_packet(netdev); + + wr32(IGC_WUS, ~0); + + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igc_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; +} + +static int __maybe_unused igc_runtime_resume(struct device *dev) +{ + return igc_resume(dev); +} + +static int __maybe_unused igc_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igc_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (!igc_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} +#endif /* CONFIG_PM */ + +static void igc_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igc_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * igc_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current PCI connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igc_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igc_io_slot_reset - called after the PCI bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igc_resume routine. + **/ +static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + netdev_err(netdev, "Could not re-enable PCI device after reset\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igc_reset(adapter); + wr32(IGC_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igc_io_resume - called when traffic can start to flow again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igc_resume routine. + */ +static void igc_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { + netdev_err(netdev, "igc_open failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers igc_err_handler = { + .error_detected = igc_io_error_detected, + .slot_reset = igc_io_slot_reset, + .resume = igc_io_resume, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops igc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) + SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle) +}; +#endif + +static struct pci_driver igc_driver = { + .name = igc_driver_name, + .id_table = igc_pci_tbl, + .probe = igc_probe, + .remove = igc_remove, +#ifdef CONFIG_PM + .driver.pm = &igc_pm_ops, +#endif + .shutdown = igc_shutdown, + .err_handler = &igc_err_handler, +}; + +/** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + +/** + * igc_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + */ +struct net_device *igc_get_hw_dev(struct igc_hw *hw) +{ + struct igc_adapter *adapter = hw->back; + + return adapter->netdev; +} + +static void igc_disable_rx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(IGC_RXDCTL(idx)); + rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; + rxdctl |= IGC_RXDCTL_SWFLUSH; + wr32(IGC_RXDCTL(idx), rxdctl); +} + +void igc_disable_rx_ring(struct igc_ring *ring) +{ + igc_disable_rx_ring_hw(ring); + igc_clean_rx_ring(ring); +} + +void igc_enable_rx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_rx_ring(adapter, ring); + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +} + +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +void igc_disable_tx_ring(struct igc_ring *ring) +{ + igc_disable_tx_ring_hw(ring); + igc_clean_tx_ring(ring); +} + +void igc_enable_tx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_tx_ring(adapter, ring); +} + +/** + * igc_init_module - Driver Registration Routine + * + * igc_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init igc_init_module(void) +{ + int ret; + + pr_info("%s\n", igc_driver_string); + pr_info("%s\n", igc_copyright); + + ret = pci_register_driver(&igc_driver); + return ret; +} + +module_init(igc_init_module); + +/** + * igc_exit_module - Driver Exit Cleanup Routine + * + * igc_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit igc_exit_module(void) +{ + pci_unregister_driver(&igc_driver); +} + +module_exit(igc_exit_module); +/* igc_main.c */ diff --git a/devices/igc/igc_main-6.4-ethercat.c b/devices/igc/igc_main-6.4-ethercat.c index ef1e34fe..67001f28 100644 --- a/devices/igc/igc_main-6.4-ethercat.c +++ b/devices/igc/igc_main-6.4-ethercat.c @@ -110,7 +110,7 @@ void igc_reset(struct igc_adapter *adapter) /* Re-establish EEE setting */ igc_set_eee_i225(hw, true, true, true); - if (!adapter->ecdev && !netif_running(adapter->netdev)) + if (!netif_running(adapter->netdev)) igc_power_down_phy_copper_base(&adapter->hw); /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ @@ -211,7 +211,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) case IGC_TX_BUFFER_TYPE_SKB: { struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* skb is reused in EtherCAT TX operation */ dev_kfree_skb_any(tx_buffer->skb); } @@ -1202,7 +1202,7 @@ static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) { struct net_device *netdev = tx_ring->netdev; struct igc_adapter *adapter = netdev_priv(netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_stop_subqueue(netdev, tx_ring->queue_index); } @@ -1216,7 +1216,7 @@ static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) return -EBUSY; /* A reprieve! */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_wake_subqueue(netdev, tx_ring->queue_index); } @@ -1414,7 +1414,7 @@ dma_error: if (dma_unmap_len(tx_buffer, len)) igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(tx_buffer->skb); tx_buffer->skb = NULL; } @@ -1593,7 +1593,7 @@ done: goto out_drop; } - if (unlikely(!adapter->ecdev && + if (unlikely(!get_ecdev(adapter) && test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { /* FIXME: add support for retrieving timestamps from @@ -1636,7 +1636,7 @@ done: return NETDEV_TX_OK; out_drop: - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(first->skb); first->skb = NULL; } @@ -2605,11 +2605,11 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) total_packets++; total_bytes += size; - } - else if (adapter->ecdev) { + } + else if (get_ecdev(adapter)) { unsigned char *va = page_address(rx_buffer->page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); - ecdev_receive(adapter->ecdev, va, size); + ecdev_receive(get_ecdev(adapter), va, size); adapter->ec_watchdog_jiffies = jiffies; igc_reuse_rx_page(rx_ring, rx_buffer); } @@ -2638,11 +2638,11 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) continue; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { total_packets++; continue; } - + /* verify the packet layout is correct */ if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; @@ -2964,7 +2964,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); break; case IGC_TX_BUFFER_TYPE_SKB: - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_consume_skb(tx_buffer->skb, napi_budget); } igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); @@ -3007,7 +3007,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) budget--; } while (likely(budget)); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); } @@ -3025,7 +3025,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) igc_xdp_xmit_zc(tx_ring); } - if (!adapter->ecdev && + if (!get_ecdev(adapter) && test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct igc_hw *hw = &adapter->hw; @@ -3070,7 +3070,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) } #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(!adapter->ecdev && total_packets && + if (unlikely(!get_ecdev(adapter) && total_packets && netif_carrier_ok(tx_ring->netdev) && igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this @@ -4040,7 +4040,7 @@ static void igc_configure_msix(struct igc_adapter *adapter) */ static void igc_irq_enable(struct igc_adapter *adapter) { - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* skip enabling interrupts */ return; } @@ -4083,7 +4083,7 @@ static void igc_irq_disable(struct igc_adapter *adapter) wr32(IGC_IMC, ~0); wrfl(); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* skip synchonizing IRQs */ return; } @@ -4152,9 +4152,7 @@ static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) if (q_vector->rx.ring) adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; - if (!adapter->ecdev) { - netif_napi_del(&q_vector->napi); - } + netif_napi_del(&q_vector->napi); } /** @@ -4556,6 +4554,9 @@ static int igc_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int work_done = 0; + if (get_ecdev(q_vector->adapter)) + return -EBUSY; + if (q_vector->tx.ring) clean_complete = igc_clean_tx_irq(q_vector, budget); @@ -4619,10 +4620,8 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter, if (!q_vector) return -ENOMEM; - if (!adapter->ecdev) { - /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); - } + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -4850,7 +4849,7 @@ void igc_up(struct igc_adapter *adapter) igc_configure(adapter); clear_bit(__IGC_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&adapter->q_vector[i]->napi); } @@ -4863,7 +4862,7 @@ void igc_up(struct igc_adapter *adapter) rd32(IGC_ICR); igc_irq_enable(adapter); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(adapter->netdev); /* start the watchdog. */ @@ -5062,7 +5061,7 @@ void igc_down(struct igc_adapter *adapter) } /* set trans_start so we don't get spurious watchdogs during reset */ netif_trans_update(netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } @@ -5082,7 +5081,7 @@ void igc_down(struct igc_adapter *adapter) adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; for (i = 0; i < adapter->num_q_vectors; i++) { - if (!adapter->ecdev && adapter->q_vector[i]) { + if (!get_ecdev(adapter) && adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); napi_disable(&adapter->q_vector[i]->napi); } @@ -5441,7 +5440,7 @@ static int igc_request_msix(struct igc_adapter *adapter) unsigned int num_q_vectors = adapter->num_q_vectors; int i = 0, err = 0, vector = 0, free_vector = 0; struct net_device *netdev = adapter->netdev; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* avoid requesting MSI-X. */ return 0; } @@ -5576,13 +5575,13 @@ static void igc_watchdog_task(struct work_struct *work) u32 link; int i; - if (adapter->ecdev) + if (get_ecdev(adapter)) hw->mac.get_link_status = true; link = igc_has_link(adapter); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, link); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), link); return; } @@ -5829,7 +5828,7 @@ static irqreturn_t igc_intr(int irq, void *data) static void igc_free_irq(struct igc_adapter *adapter) { - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* no IRQ to free in EtherCAT operation */ return; } @@ -5879,7 +5878,7 @@ static int igc_request_irq(struct igc_adapter *adapter) igc_assign_vector(adapter->q_vector[0], 0); - if (!adapter->ecdev && adapter->flags & IGC_FLAG_HAS_MSI) { + if (!get_ecdev(adapter) && adapter->flags & IGC_FLAG_HAS_MSI) { err = request_irq(pdev->irq, &igc_intr_msi, 0, netdev->name, adapter); if (!err) @@ -5890,7 +5889,7 @@ static int igc_request_irq(struct igc_adapter *adapter) adapter->flags &= ~IGC_FLAG_HAS_MSI; } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, netdev->name, adapter); @@ -5932,8 +5931,8 @@ static int __igc_open(struct net_device *netdev, bool resuming) if (!resuming) pm_runtime_get_sync(&pdev->dev); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); @@ -5957,7 +5956,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) if (err) goto err_req_irq; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) @@ -5969,7 +5968,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) } clear_bit(__IGC_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&adapter->q_vector[i]->napi); } @@ -5980,12 +5979,12 @@ static int __igc_open(struct net_device *netdev, bool resuming) if (!resuming) pm_runtime_put(&pdev->dev); - - if (!adapter->ecdev) { + + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(netdev); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* start the watchdog. */ hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); @@ -6553,7 +6552,7 @@ static void ec_kick_watchdog(struct irq_work *work) schedule_work(&adapter->watchdog_task); } -/** +/** * ec_poll - EtherCAT poll routine * @netdev: net device structure * @@ -6583,8 +6582,6 @@ void ec_poll(struct net_device *netdev) } } -/** - /* PCIe configuration access */ void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) { @@ -6919,12 +6916,13 @@ static int igc_probe(struct pci_dev *pdev, */ igc_get_hw_control(adapter); - adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); - if (adapter->ecdev) { + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = true; + if (get_ecdev(adapter)) { init_irq_work(&adapter->ec_watchdog_kicker, ec_kick_watchdog); - err = ecdev_open(adapter->ecdev); + err = ecdev_open(get_ecdev(adapter)); if (err) { - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); goto err_register; } adapter->ec_watchdog_jiffies = jiffies; @@ -6986,10 +6984,10 @@ static void igc_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igc_adapter *adapter = netdev_priv(netdev); - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); - irq_work_sync(&adapter->irq_work); - ecdev_withdraw(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); + irq_work_sync(&adapter->ec_watchdog_kicker); + ecdev_withdraw(get_ecdev(adapter)); } pm_runtime_get_noresume(&pdev->dev); @@ -7014,7 +7012,7 @@ static void igc_remove(struct pci_dev *pdev) * would have already happened in close and is redundant. */ igc_release_hw_control(adapter); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { unregister_netdev(netdev); } @@ -7037,8 +7035,8 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, u32 ctrl, rctl, status; bool wake; - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); } else { rtnl_lock(); netif_device_detach(netdev); @@ -7178,8 +7176,8 @@ static int __maybe_unused igc_resume(struct device *dev) wr32(IGC_WUS, ~0); - if (adapter->ecdev) { - ecdev_open(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_open(get_ecdev(adapter)); } else { rtnl_lock(); if (!err && netif_running(netdev)) diff --git a/devices/igc/igc_main-6.6-ethercat.c b/devices/igc/igc_main-6.6-ethercat.c index 81e911ad..0a92adb8 100644 --- a/devices/igc/igc_main-6.6-ethercat.c +++ b/devices/igc/igc_main-6.6-ethercat.c @@ -110,7 +110,7 @@ void igc_reset(struct igc_adapter *adapter) /* Re-establish EEE setting */ igc_set_eee_i225(hw, true, true, true); - if (!adapter->ecdev && !netif_running(adapter->netdev)) + if (!netif_running(adapter->netdev)) igc_power_down_phy_copper_base(&adapter->hw); /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ @@ -211,7 +211,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) case IGC_TX_BUFFER_TYPE_SKB: { struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* skb is reused in EtherCAT TX operation */ dev_kfree_skb_any(tx_buffer->skb); } @@ -1229,7 +1229,7 @@ static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) { struct net_device *netdev = tx_ring->netdev; struct igc_adapter *adapter = netdev_priv(netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_stop_subqueue(netdev, tx_ring->queue_index); } @@ -1243,7 +1243,7 @@ static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) return -EBUSY; /* A reprieve! */ - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_wake_subqueue(netdev, tx_ring->queue_index); } @@ -1452,7 +1452,7 @@ dma_error: if (dma_unmap_len(tx_buffer, len)) igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(tx_buffer->skb); tx_buffer->skb = NULL; } @@ -1651,7 +1651,7 @@ done: goto out_drop; } - if (unlikely(!adapter->ecdev && + if (unlikely(!get_ecdev(adapter) && test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { /* FIXME: add support for retrieving timestamps from @@ -1692,7 +1692,7 @@ done: return NETDEV_TX_OK; out_drop: - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { dev_kfree_skb_any(first->skb); first->skb = NULL; } @@ -2664,11 +2664,11 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) total_packets++; total_bytes += size; - } - else if (adapter->ecdev) { + } + else if (get_ecdev(adapter)) { unsigned char *va = page_address(rx_buffer->page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); - ecdev_receive(adapter->ecdev, va, size); + ecdev_receive(get_ecdev(adapter), va, size); adapter->ec_watchdog_jiffies = jiffies; igc_reuse_rx_page(rx_ring, rx_buffer); } @@ -2697,11 +2697,11 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) continue; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { total_packets++; continue; } - + /* verify the packet layout is correct */ if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { skb = NULL; @@ -3027,7 +3027,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); break; case IGC_TX_BUFFER_TYPE_SKB: - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { napi_consume_skb(tx_buffer->skb, napi_budget); } igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); @@ -3070,7 +3070,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) budget--; } while (likely(budget)); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); } @@ -3088,7 +3088,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) igc_xdp_xmit_zc(tx_ring); } - if (!adapter->ecdev && + if (!get_ecdev(adapter) && test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct igc_hw *hw = &adapter->hw; @@ -3133,7 +3133,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) } #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(!adapter->ecdev && total_packets && + if (unlikely(!get_ecdev(adapter) && total_packets && netif_carrier_ok(tx_ring->netdev) && igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this @@ -4103,7 +4103,7 @@ static void igc_configure_msix(struct igc_adapter *adapter) */ static void igc_irq_enable(struct igc_adapter *adapter) { - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* skip enabling interrupts */ return; } @@ -4146,7 +4146,7 @@ static void igc_irq_disable(struct igc_adapter *adapter) wr32(IGC_IMC, ~0); wrfl(); - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* skip synchonizing IRQs */ return; } @@ -4215,9 +4215,7 @@ static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) if (q_vector->rx.ring) adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; - if (!adapter->ecdev) { - netif_napi_del(&q_vector->napi); - } + netif_napi_del(&q_vector->napi); } /** @@ -4619,6 +4617,9 @@ static int igc_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int work_done = 0; + if (get_ecdev(q_vector->adapter)) + return -EBUSY; + if (q_vector->tx.ring) clean_complete = igc_clean_tx_irq(q_vector, budget); @@ -4682,10 +4683,8 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter, if (!q_vector) return -ENOMEM; - if (!adapter->ecdev) { - /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); - } + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -4914,7 +4913,7 @@ void igc_up(struct igc_adapter *adapter) igc_configure(adapter); clear_bit(__IGC_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&adapter->q_vector[i]->napi); } @@ -4927,7 +4926,7 @@ void igc_up(struct igc_adapter *adapter) rd32(IGC_ICR); igc_irq_enable(adapter); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(adapter->netdev); /* start the watchdog. */ @@ -5126,7 +5125,7 @@ void igc_down(struct igc_adapter *adapter) } /* set trans_start so we don't get spurious watchdogs during reset */ netif_trans_update(netdev); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } @@ -5146,7 +5145,7 @@ void igc_down(struct igc_adapter *adapter) adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; for (i = 0; i < adapter->num_q_vectors; i++) { - if (!adapter->ecdev && adapter->q_vector[i]) { + if (!get_ecdev(adapter) && adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); napi_disable(&adapter->q_vector[i]->napi); } @@ -5506,7 +5505,7 @@ static int igc_request_msix(struct igc_adapter *adapter) unsigned int num_q_vectors = adapter->num_q_vectors; int i = 0, err = 0, vector = 0, free_vector = 0; struct net_device *netdev = adapter->netdev; - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* avoid requesting MSI-X. */ return 0; } @@ -5641,13 +5640,13 @@ static void igc_watchdog_task(struct work_struct *work) u32 link; int i; - if (adapter->ecdev) + if (get_ecdev(adapter)) hw->mac.get_link_status = true; link = igc_has_link(adapter); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, link); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), link); return; } @@ -5894,7 +5893,7 @@ static irqreturn_t igc_intr(int irq, void *data) static void igc_free_irq(struct igc_adapter *adapter) { - if (adapter->ecdev) { + if (get_ecdev(adapter)) { /* no IRQ to free in EtherCAT operation */ return; } @@ -5944,7 +5943,7 @@ static int igc_request_irq(struct igc_adapter *adapter) igc_assign_vector(adapter->q_vector[0], 0); - if (!adapter->ecdev && adapter->flags & IGC_FLAG_HAS_MSI) { + if (!get_ecdev(adapter) && adapter->flags & IGC_FLAG_HAS_MSI) { err = request_irq(pdev->irq, &igc_intr_msi, 0, netdev->name, adapter); if (!err) @@ -5955,7 +5954,7 @@ static int igc_request_irq(struct igc_adapter *adapter) adapter->flags &= ~IGC_FLAG_HAS_MSI; } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, netdev->name, adapter); @@ -5997,8 +5996,8 @@ static int __igc_open(struct net_device *netdev, bool resuming) if (!resuming) pm_runtime_get_sync(&pdev->dev); - if (adapter->ecdev) { - ecdev_set_link(adapter->ecdev, 0); + if (get_ecdev(adapter)) { + ecdev_set_link(get_ecdev(adapter), 0); } else { netif_carrier_off(netdev); @@ -6022,7 +6021,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) if (err) goto err_req_irq; - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) @@ -6034,7 +6033,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) } clear_bit(__IGC_DOWN, &adapter->state); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&adapter->q_vector[i]->napi); } @@ -6045,12 +6044,12 @@ static int __igc_open(struct net_device *netdev, bool resuming) if (!resuming) pm_runtime_put(&pdev->dev); - - if (!adapter->ecdev) { + + if (!get_ecdev(adapter)) { netif_tx_start_all_queues(netdev); } - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { /* start the watchdog. */ hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); @@ -6673,7 +6672,7 @@ static void ec_kick_watchdog(struct irq_work *work) schedule_work(&adapter->watchdog_task); } -/** +/** * ec_poll - EtherCAT poll routine * @netdev: net device structure * @@ -6703,8 +6702,6 @@ void ec_poll(struct net_device *netdev) } } -/** - /* PCIe configuration access */ void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) { @@ -7059,12 +7056,13 @@ static int igc_probe(struct pci_dev *pdev, */ igc_get_hw_control(adapter); - adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); - if (adapter->ecdev) { + adapter->ecdev_ = ecdev_offer(netdev, ec_poll, THIS_MODULE); + adapter->ecdev_initialized = true; + if (get_ecdev(adapter)) { init_irq_work(&adapter->ec_watchdog_kicker, ec_kick_watchdog); - err = ecdev_open(adapter->ecdev); + err = ecdev_open(get_ecdev(adapter)); if (err) { - ecdev_withdraw(adapter->ecdev); + ecdev_withdraw(get_ecdev(adapter)); goto err_register; } adapter->ec_watchdog_jiffies = jiffies; @@ -7126,10 +7124,10 @@ static void igc_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igc_adapter *adapter = netdev_priv(netdev); - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); - irq_work_sync(&adapter->irq_work); - ecdev_withdraw(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); + irq_work_sync(&adapter->ec_watchdog_kicker); + ecdev_withdraw(get_ecdev(adapter)); } pm_runtime_get_noresume(&pdev->dev); @@ -7154,7 +7152,7 @@ static void igc_remove(struct pci_dev *pdev) * would have already happened in close and is redundant. */ igc_release_hw_control(adapter); - if (!adapter->ecdev) { + if (!get_ecdev(adapter)) { unregister_netdev(netdev); } @@ -7177,8 +7175,8 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, u32 ctrl, rctl, status; bool wake; - if (adapter->ecdev) { - ecdev_close(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_close(get_ecdev(adapter)); } else { rtnl_lock(); netif_device_detach(netdev); @@ -7318,8 +7316,8 @@ static int __maybe_unused igc_resume(struct device *dev) wr32(IGC_WUS, ~0); - if (adapter->ecdev) { - ecdev_open(adapter->ecdev); + if (get_ecdev(adapter)) { + ecdev_open(get_ecdev(adapter)); } else { rtnl_lock(); if (!err && netif_running(netdev)) diff --git a/devices/igc/igc_nvm-5.14-ethercat.c b/devices/igc/igc_nvm-5.14-ethercat.c new file mode 100644 index 00000000..b6b5a75f --- /dev/null +++ b/devices/igc/igc_nvm-5.14-ethercat.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_mac-5.14-ethercat.h" +#include "igc_nvm-5.14-ethercat.h" + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = rd32(IGC_EERD); + else + reg = rd32(IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igc_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +s32 igc_acquire_nvm(struct igc_hw *hw) +{ + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + u32 eecd = rd32(IGC_EECD); + s32 ret_val = 0; + + wr32(IGC_EECD, eecd | IGC_EECD_REQ); + eecd = rd32(IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + udelay(5); + eecd = rd32(IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -IGC_ERR_NVM; + } + + return ret_val; +} + +/** + * igc_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void igc_release_nvm(struct igc_hw *hw) +{ + u32 eecd; + + eecd = rd32(IGC_EECD); + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + wr32(IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igc_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + */ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(IGC_RAH(0)); + rar_low = rd32(IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igc_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val = 0; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} diff --git a/devices/igc/igc_nvm-5.14-ethercat.h b/devices/igc/igc_nvm-5.14-ethercat.h new file mode 100644 index 00000000..f9fc2e9c --- /dev/null +++ b/devices/igc/igc_nvm-5.14-ethercat.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +s32 igc_acquire_nvm(struct igc_hw *hw); +void igc_release_nvm(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_nvm-5.14-orig.c b/devices/igc/igc_nvm-5.14-orig.c new file mode 100644 index 00000000..58f81aba --- /dev/null +++ b/devices/igc/igc_nvm-5.14-orig.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_mac.h" +#include "igc_nvm.h" + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = rd32(IGC_EERD); + else + reg = rd32(IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igc_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +s32 igc_acquire_nvm(struct igc_hw *hw) +{ + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + u32 eecd = rd32(IGC_EECD); + s32 ret_val = 0; + + wr32(IGC_EECD, eecd | IGC_EECD_REQ); + eecd = rd32(IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + udelay(5); + eecd = rd32(IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -IGC_ERR_NVM; + } + + return ret_val; +} + +/** + * igc_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void igc_release_nvm(struct igc_hw *hw) +{ + u32 eecd; + + eecd = rd32(IGC_EECD); + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + wr32(IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igc_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + */ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(IGC_RAH(0)); + rar_low = rd32(IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igc_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val = 0; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} diff --git a/devices/igc/igc_nvm-5.14-orig.h b/devices/igc/igc_nvm-5.14-orig.h new file mode 100644 index 00000000..f9fc2e9c --- /dev/null +++ b/devices/igc/igc_nvm-5.14-orig.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +s32 igc_acquire_nvm(struct igc_hw *hw); +void igc_release_nvm(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_nvm-6.1-ethercat.c b/devices/igc/igc_nvm-6.1-ethercat.c new file mode 100644 index 00000000..f8aa0443 --- /dev/null +++ b/devices/igc/igc_nvm-6.1-ethercat.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_mac-6.1-ethercat.h" +#include "igc_nvm-6.1-ethercat.h" + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = rd32(IGC_EERD); + else + reg = rd32(IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igc_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +s32 igc_acquire_nvm(struct igc_hw *hw) +{ + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + u32 eecd = rd32(IGC_EECD); + s32 ret_val = 0; + + wr32(IGC_EECD, eecd | IGC_EECD_REQ); + eecd = rd32(IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + udelay(5); + eecd = rd32(IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -IGC_ERR_NVM; + } + + return ret_val; +} + +/** + * igc_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void igc_release_nvm(struct igc_hw *hw) +{ + u32 eecd; + + eecd = rd32(IGC_EECD); + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + wr32(IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igc_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + */ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(IGC_RAH(0)); + rar_low = rd32(IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igc_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val = 0; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} diff --git a/devices/igc/igc_nvm-6.1-ethercat.h b/devices/igc/igc_nvm-6.1-ethercat.h new file mode 100644 index 00000000..f9fc2e9c --- /dev/null +++ b/devices/igc/igc_nvm-6.1-ethercat.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +s32 igc_acquire_nvm(struct igc_hw *hw); +void igc_release_nvm(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_nvm-6.1-orig.c b/devices/igc/igc_nvm-6.1-orig.c new file mode 100644 index 00000000..58f81aba --- /dev/null +++ b/devices/igc/igc_nvm-6.1-orig.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_mac.h" +#include "igc_nvm.h" + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = rd32(IGC_EERD); + else + reg = rd32(IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igc_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +s32 igc_acquire_nvm(struct igc_hw *hw) +{ + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + u32 eecd = rd32(IGC_EECD); + s32 ret_val = 0; + + wr32(IGC_EECD, eecd | IGC_EECD_REQ); + eecd = rd32(IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + udelay(5); + eecd = rd32(IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -IGC_ERR_NVM; + } + + return ret_val; +} + +/** + * igc_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void igc_release_nvm(struct igc_hw *hw) +{ + u32 eecd; + + eecd = rd32(IGC_EECD); + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + wr32(IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igc_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + */ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(IGC_RAH(0)); + rar_low = rd32(IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igc_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val = 0; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} diff --git a/devices/igc/igc_nvm-6.1-orig.h b/devices/igc/igc_nvm-6.1-orig.h new file mode 100644 index 00000000..f9fc2e9c --- /dev/null +++ b/devices/igc/igc_nvm-6.1-orig.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +s32 igc_acquire_nvm(struct igc_hw *hw); +void igc_release_nvm(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_phy-5.14-ethercat.c b/devices/igc/igc_phy-5.14-ethercat.c new file mode 100644 index 00000000..b1bbd499 --- /dev/null +++ b/devices/igc/igc_phy-5.14-ethercat.c @@ -0,0 +1,811 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_phy-5.14-ethercat.h" + +/** + * igc_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return IGC_ERR_BLK_PHY_RESET (12). + */ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + u32 manc; + + manc = rd32(IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_ERR_BLK_PHY_RESET : 0; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usleep_range(200, 500); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igc_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + u16 i, phy_status; + s32 ret_val = 0; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + */ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + */ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + + /* Temporary workaround - should be removed when PHY will implement + * IEEE registers as properly + */ + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + usleep_range(1000, 2000); +} + +/** + * igc_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + */ +s32 igc_check_downshift(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + + switch (phy->type) { + case igc_phy_i225: + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + } + + return ret_val; +} + +/** + * igc_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + */ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; + s32 ret_val; + u32 ctrl; + + ret_val = igc_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + phpm = rd32(IGC_I225_PHPM); + + ctrl = rd32(IGC_CTRL); + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(IGC_CTRL, ctrl); + wrfl(); + + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); + + phy->ops.release(hw); + +out: + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 aneg_multigbt_an_ctrl = 0; + u16 mii_1000t_ctrl_reg = 0; + u16 mii_autoneg_adv_reg; + s32 ret_val; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && + hw->phy.id == I225_I_PHY_ID) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + hw_dbg("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && + hw->phy.id == I225_I_PHY_ID) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igc_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + */ +s32 igc_setup_copper_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igc_config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + */ +static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + usleep_range(500, 1000); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + *data = (u16)mdic; + +out: + return ret_val; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to write the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + usleep_range(500, 1000); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + */ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + */ +static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 *data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + */ +static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + if (ret_val) + return ret_val; + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + if (ret_val) + return ret_val; + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_fw_version - Read gPHY firmware version + * @hw: pointer to the HW structure + */ +u16 igc_read_phy_fw_version(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 gphy_version = 0; + u16 ret_val; + + /* NVM image version is reported as firmware version for i225 device */ + ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version); + if (ret_val) + hw_dbg("igc_phy: read wrong gphy version\n"); + + return gphy_version; +} diff --git a/devices/igc/igc_phy-5.14-ethercat.h b/devices/igc/igc_phy-5.14-ethercat.h new file mode 100644 index 00000000..6d6207c0 --- /dev/null +++ b/devices/igc/igc_phy-5.14-ethercat.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +#include "igc_mac-5.14-ethercat.h" + +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 igc_check_downshift(struct igc_hw *hw); +s32 igc_setup_copper_link(struct igc_hw *hw); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); +u16 igc_read_phy_fw_version(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_phy-5.14-orig.c b/devices/igc/igc_phy-5.14-orig.c new file mode 100644 index 00000000..83aeb5e7 --- /dev/null +++ b/devices/igc/igc_phy-5.14-orig.c @@ -0,0 +1,811 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_phy.h" + +/** + * igc_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return IGC_ERR_BLK_PHY_RESET (12). + */ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + u32 manc; + + manc = rd32(IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_ERR_BLK_PHY_RESET : 0; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usleep_range(200, 500); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igc_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + u16 i, phy_status; + s32 ret_val = 0; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + */ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + */ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + + /* Temporary workaround - should be removed when PHY will implement + * IEEE registers as properly + */ + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + usleep_range(1000, 2000); +} + +/** + * igc_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + */ +s32 igc_check_downshift(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val; + + switch (phy->type) { + case igc_phy_i225: + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + } + + return ret_val; +} + +/** + * igc_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + */ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; + s32 ret_val; + u32 ctrl; + + ret_val = igc_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + phpm = rd32(IGC_I225_PHPM); + + ctrl = rd32(IGC_CTRL); + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(IGC_CTRL, ctrl); + wrfl(); + + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); + + phy->ops.release(hw); + +out: + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 aneg_multigbt_an_ctrl = 0; + u16 mii_1000t_ctrl_reg = 0; + u16 mii_autoneg_adv_reg; + s32 ret_val; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && + hw->phy.id == I225_I_PHY_ID) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + hw_dbg("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && + hw->phy.id == I225_I_PHY_ID) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igc_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + */ +s32 igc_setup_copper_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igc_config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + */ +static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + usleep_range(500, 1000); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + *data = (u16)mdic; + +out: + return ret_val; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to write the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + usleep_range(500, 1000); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + */ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + */ +static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 *data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + */ +static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + if (ret_val) + return ret_val; + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + if (ret_val) + return ret_val; + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_fw_version - Read gPHY firmware version + * @hw: pointer to the HW structure + */ +u16 igc_read_phy_fw_version(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 gphy_version = 0; + u16 ret_val; + + /* NVM image version is reported as firmware version for i225 device */ + ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version); + if (ret_val) + hw_dbg("igc_phy: read wrong gphy version\n"); + + return gphy_version; +} diff --git a/devices/igc/igc_phy-5.14-orig.h b/devices/igc/igc_phy-5.14-orig.h new file mode 100644 index 00000000..1b031372 --- /dev/null +++ b/devices/igc/igc_phy-5.14-orig.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +#include "igc_mac.h" + +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 igc_check_downshift(struct igc_hw *hw); +s32 igc_setup_copper_link(struct igc_hw *hw); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); +u16 igc_read_phy_fw_version(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_phy-6.1-ethercat.c b/devices/igc/igc_phy-6.1-ethercat.c new file mode 100644 index 00000000..360ecb8d --- /dev/null +++ b/devices/igc/igc_phy-6.1-ethercat.c @@ -0,0 +1,795 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_phy-6.1-ethercat.h" + +/** + * igc_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return IGC_ERR_BLK_PHY_RESET (12). + */ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + u32 manc; + + manc = rd32(IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_ERR_BLK_PHY_RESET : 0; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usleep_range(200, 500); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igc_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + u16 i, phy_status; + s32 ret_val = 0; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + */ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + */ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + + /* Temporary workaround - should be removed when PHY will implement + * IEEE registers as properly + */ + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + usleep_range(1000, 2000); +} + +/** + * igc_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * A downshift is detected by querying the PHY link health. + */ +void igc_check_downshift(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + + /* speed downshift not supported */ + phy->speed_downgraded = false; +} + +/** + * igc_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + */ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; + s32 ret_val; + u32 ctrl; + + ret_val = igc_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + phpm = rd32(IGC_I225_PHPM); + + ctrl = rd32(IGC_CTRL); + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(IGC_CTRL, ctrl); + wrfl(); + + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); + + phy->ops.release(hw); + +out: + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 aneg_multigbt_an_ctrl = 0; + u16 mii_1000t_ctrl_reg = 0; + u16 mii_autoneg_adv_reg; + s32 ret_val; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + hw_dbg("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igc_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + */ +s32 igc_setup_copper_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igc_config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + */ +static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + *data = (u16)mdic; + +out: + return ret_val; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to write the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + */ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + */ +static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 *data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + */ +static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_fw_version - Read gPHY firmware version + * @hw: pointer to the HW structure + */ +u16 igc_read_phy_fw_version(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 gphy_version = 0; + u16 ret_val; + + /* NVM image version is reported as firmware version for i225 device */ + ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version); + if (ret_val) + hw_dbg("igc_phy: read wrong gphy version\n"); + + return gphy_version; +} diff --git a/devices/igc/igc_phy-6.1-ethercat.h b/devices/igc/igc_phy-6.1-ethercat.h new file mode 100644 index 00000000..5f5ff349 --- /dev/null +++ b/devices/igc/igc_phy-6.1-ethercat.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +#include "igc_mac-6.1-ethercat.h" + +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igc_check_downshift(struct igc_hw *hw); +s32 igc_setup_copper_link(struct igc_hw *hw); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); +u16 igc_read_phy_fw_version(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_phy-6.1-orig.c b/devices/igc/igc_phy-6.1-orig.c new file mode 100644 index 00000000..53b77c96 --- /dev/null +++ b/devices/igc/igc_phy-6.1-orig.c @@ -0,0 +1,795 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_phy.h" + +/** + * igc_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return IGC_ERR_BLK_PHY_RESET (12). + */ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + u32 manc; + + manc = rd32(IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_ERR_BLK_PHY_RESET : 0; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usleep_range(200, 500); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igc_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + u16 i, phy_status; + s32 ret_val = 0; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + */ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + */ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + + /* Temporary workaround - should be removed when PHY will implement + * IEEE registers as properly + */ + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + usleep_range(1000, 2000); +} + +/** + * igc_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * A downshift is detected by querying the PHY link health. + */ +void igc_check_downshift(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + + /* speed downshift not supported */ + phy->speed_downgraded = false; +} + +/** + * igc_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + */ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; + s32 ret_val; + u32 ctrl; + + ret_val = igc_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + phpm = rd32(IGC_I225_PHPM); + + ctrl = rd32(IGC_CTRL); + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(IGC_CTRL, ctrl); + wrfl(); + + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); + + phy->ops.release(hw); + +out: + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 aneg_multigbt_an_ctrl = 0; + u16 mii_1000t_ctrl_reg = 0; + u16 mii_autoneg_adv_reg; + s32 ret_val; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + hw_dbg("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igc_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + */ +s32 igc_setup_copper_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igc_config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + */ +static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + *data = (u16)mdic; + +out: + return ret_val; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to write the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + */ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + */ +static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 *data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + */ +static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_fw_version - Read gPHY firmware version + * @hw: pointer to the HW structure + */ +u16 igc_read_phy_fw_version(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 gphy_version = 0; + u16 ret_val; + + /* NVM image version is reported as firmware version for i225 device */ + ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version); + if (ret_val) + hw_dbg("igc_phy: read wrong gphy version\n"); + + return gphy_version; +} diff --git a/devices/igc/igc_phy-6.1-orig.h b/devices/igc/igc_phy-6.1-orig.h new file mode 100644 index 00000000..832a7e35 --- /dev/null +++ b/devices/igc/igc_phy-6.1-orig.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +#include "igc_mac.h" + +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igc_check_downshift(struct igc_hw *hw); +s32 igc_setup_copper_link(struct igc_hw *hw); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); +u16 igc_read_phy_fw_version(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_ptp-5.14-ethercat.c b/devices/igc/igc_ptp-5.14-ethercat.c new file mode 100644 index 00000000..2b8219d3 --- /dev/null +++ b/devices/igc/igc_ptp-5.14-ethercat.c @@ -0,0 +1,914 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc-5.14-ethercat.h" + +#include +#include +#include +#include +#include +#include + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) +#define IGC_PTP_TX_TIMEOUT (HZ * 15) + +/* SYSTIM read access for I225 */ +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp is latched when SYSTIML is read. */ + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igc_ptp_write_i225(struct igc_adapter *adapter, + const struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_SYSTIML, ts->tv_nsec); + wr32(IGC_SYSTIMH, ts->tv_sec); +} + +static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 14; + rate = div_u64(rate, 78125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(IGC_TIMINCA, inca); + + return 0; +} + +static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_read(igc, &now); + now = timespec64_add(now, then); + igc_ptp_write_i225(igc, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML); + ts->tv_sec = rd32(IGC_SYSTIMH); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_write_i225(igc, ts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + **/ +static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + switch (adapter->hw.mac.type) { + case igc_i225: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + break; + } +} + +/** + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer + * + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + * + * Returns timestamp value. + */ +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) +{ + ktime_t timestamp; + u32 secs, nsecs; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + */ + nsecs = le32_to_cpu(buf[2]); + secs = le32_to_cpu(buf[3]); + + timestamp = ktime_set(secs, nsecs); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; + } + + return ktime_sub_ns(timestamp, adjust); +} + +static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + wr32(IGC_TSYNCRXCTL, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + val &= ~IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = rd32(IGC_RXPBS); + val &= ~IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); +} + +static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + val = rd32(IGC_RXPBS); + val |= IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + /* FIXME: For now, only support retrieving RX timestamps from + * timer 0. + */ + val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | + IGC_TSYNCRXCTL_RXSYNSIG; + wr32(IGC_TSYNCRXCTL, val); +} + +static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, 0); +} + +static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); + + /* Read TXSTMP registers to discard any timestamp previously stored. */ + rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); +} + +/** + * igc_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, + struct hwtstamp_config *config) +{ + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + igc_ptp_disable_tx_timestamp(adapter); + break; + case HWTSTAMP_TX_ON: + igc_ptp_enable_tx_timestamp(adapter); + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + igc_ptp_disable_rx_timestamp(adapter); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + igc_ptp_enable_rx_timestamp(adapter); + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return 0; +} + +static void igc_ptp_tx_timeout(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ + rd32(IGC_TXSTMPH); + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +} + +void igc_ptp_tx_hang(struct igc_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IGC_PTP_TX_TIMEOUT); + + if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + cancel_work_sync(&adapter->ptp_tx_work); + igc_ptp_tx_timeout(adapter); + } +} + +/** + * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + */ +static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + struct igc_hw *hw = &adapter->hw; + int adjust = 0; + u64 regval; + + if (WARN_ON_ONCE(!skb)) + return; + + regval = rd32(IGC_TXSTMPL); + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + /* Clear the lock early before calling skb_tstamp_tx so that + * applications are not woken up before the lock bit is clear. We use + * a copy of the skb pointer to ensure other threads can't change it + * while we're notifying the stack. + */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igc_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + */ +static void igc_ptp_tx_work(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, struct igc_adapter, + ptp_tx_work); + struct igc_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + tsynctxctl = rd32(IGC_TSYNCTXCTL); + if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0))) + return; + + igc_ptp_tx_hwtstamp(adapter); +} + +/** + * igc_ptp_set_ts_config - set hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + **/ +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igc_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_get_ts_config - get hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igc_ptp_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + int i; + + switch (hw->mac.type) { + case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; + adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; + adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.settime64 = igc_ptp_settime_i225; + adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); + adapter->ptp_reset_start = ktime_get(); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + netdev_err(netdev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + netdev_info(netdev, "PHC added\n"); + adapter->ptp_flags |= IGC_PTP_ENABLED; + } +} + +static void igc_ptp_time_save(struct igc_adapter *adapter) +{ + igc_ptp_read(adapter, &adapter->prev_ptp_time); + adapter->ptp_reset_start = ktime_get(); +} + +static void igc_ptp_time_restore(struct igc_adapter *adapter) +{ + struct timespec64 ts = adapter->prev_ptp_time; + ktime_t delta; + + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); + + timespec64_add_ns(&ts, ktime_to_ns(delta)); + + igc_ptp_write_i225(adapter, &ts); +} + +/** + * igc_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igc_ptp_suspend(struct igc_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + + cancel_work_sync(&adapter->ptp_tx_work); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + if (pci_device_is_present(adapter->pdev)) + igc_ptp_time_save(adapter); +} + +/** + * igc_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igc_ptp_stop(struct igc_adapter *adapter) +{ + igc_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + netdev_info(adapter->netdev, "PHC removed\n"); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; + } +} + +/** + * igc_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igc_ptp_reset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + unsigned long flags; + + /* reset the tstamp_config */ + igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case igc_i225: + wr32(IGC_TSAUXC, 0x0); + wr32(IGC_TSSDP, 0x0); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); + wr32(IGC_IMS, IGC_IMS_TS); + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if (hw->mac.type == igc_i225) { + igc_ptp_time_restore(adapter); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); +} diff --git a/devices/igc/igc_ptp-5.14-orig.c b/devices/igc/igc_ptp-5.14-orig.c new file mode 100644 index 00000000..4ae19c6a --- /dev/null +++ b/devices/igc/igc_ptp-5.14-orig.c @@ -0,0 +1,914 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc.h" + +#include +#include +#include +#include +#include +#include + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) +#define IGC_PTP_TX_TIMEOUT (HZ * 15) + +/* SYSTIM read access for I225 */ +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp is latched when SYSTIML is read. */ + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igc_ptp_write_i225(struct igc_adapter *adapter, + const struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_SYSTIML, ts->tv_nsec); + wr32(IGC_SYSTIMH, ts->tv_sec); +} + +static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 14; + rate = div_u64(rate, 78125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(IGC_TIMINCA, inca); + + return 0; +} + +static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_read(igc, &now); + now = timespec64_add(now, then); + igc_ptp_write_i225(igc, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML); + ts->tv_sec = rd32(IGC_SYSTIMH); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_write_i225(igc, ts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + **/ +static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + switch (adapter->hw.mac.type) { + case igc_i225: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + break; + } +} + +/** + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer + * + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + * + * Returns timestamp value. + */ +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) +{ + ktime_t timestamp; + u32 secs, nsecs; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + */ + nsecs = le32_to_cpu(buf[2]); + secs = le32_to_cpu(buf[3]); + + timestamp = ktime_set(secs, nsecs); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; + } + + return ktime_sub_ns(timestamp, adjust); +} + +static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + wr32(IGC_TSYNCRXCTL, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + val &= ~IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = rd32(IGC_RXPBS); + val &= ~IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); +} + +static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + val = rd32(IGC_RXPBS); + val |= IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + /* FIXME: For now, only support retrieving RX timestamps from + * timer 0. + */ + val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | + IGC_TSYNCRXCTL_RXSYNSIG; + wr32(IGC_TSYNCRXCTL, val); +} + +static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, 0); +} + +static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); + + /* Read TXSTMP registers to discard any timestamp previously stored. */ + rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); +} + +/** + * igc_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, + struct hwtstamp_config *config) +{ + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + igc_ptp_disable_tx_timestamp(adapter); + break; + case HWTSTAMP_TX_ON: + igc_ptp_enable_tx_timestamp(adapter); + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + igc_ptp_disable_rx_timestamp(adapter); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + igc_ptp_enable_rx_timestamp(adapter); + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return 0; +} + +static void igc_ptp_tx_timeout(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ + rd32(IGC_TXSTMPH); + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +} + +void igc_ptp_tx_hang(struct igc_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IGC_PTP_TX_TIMEOUT); + + if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + cancel_work_sync(&adapter->ptp_tx_work); + igc_ptp_tx_timeout(adapter); + } +} + +/** + * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + */ +static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + struct igc_hw *hw = &adapter->hw; + int adjust = 0; + u64 regval; + + if (WARN_ON_ONCE(!skb)) + return; + + regval = rd32(IGC_TXSTMPL); + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + /* Clear the lock early before calling skb_tstamp_tx so that + * applications are not woken up before the lock bit is clear. We use + * a copy of the skb pointer to ensure other threads can't change it + * while we're notifying the stack. + */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igc_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + */ +static void igc_ptp_tx_work(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, struct igc_adapter, + ptp_tx_work); + struct igc_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + tsynctxctl = rd32(IGC_TSYNCTXCTL); + if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0))) + return; + + igc_ptp_tx_hwtstamp(adapter); +} + +/** + * igc_ptp_set_ts_config - set hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + **/ +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igc_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_get_ts_config - get hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igc_ptp_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + int i; + + switch (hw->mac.type) { + case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; + adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; + adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.settime64 = igc_ptp_settime_i225; + adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); + adapter->ptp_reset_start = ktime_get(); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + netdev_err(netdev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + netdev_info(netdev, "PHC added\n"); + adapter->ptp_flags |= IGC_PTP_ENABLED; + } +} + +static void igc_ptp_time_save(struct igc_adapter *adapter) +{ + igc_ptp_read(adapter, &adapter->prev_ptp_time); + adapter->ptp_reset_start = ktime_get(); +} + +static void igc_ptp_time_restore(struct igc_adapter *adapter) +{ + struct timespec64 ts = adapter->prev_ptp_time; + ktime_t delta; + + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); + + timespec64_add_ns(&ts, ktime_to_ns(delta)); + + igc_ptp_write_i225(adapter, &ts); +} + +/** + * igc_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igc_ptp_suspend(struct igc_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + + cancel_work_sync(&adapter->ptp_tx_work); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + if (pci_device_is_present(adapter->pdev)) + igc_ptp_time_save(adapter); +} + +/** + * igc_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igc_ptp_stop(struct igc_adapter *adapter) +{ + igc_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + netdev_info(adapter->netdev, "PHC removed\n"); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; + } +} + +/** + * igc_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igc_ptp_reset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + unsigned long flags; + + /* reset the tstamp_config */ + igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case igc_i225: + wr32(IGC_TSAUXC, 0x0); + wr32(IGC_TSSDP, 0x0); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); + wr32(IGC_IMS, IGC_IMS_TS); + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if (hw->mac.type == igc_i225) { + igc_ptp_time_restore(adapter); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); +} diff --git a/devices/igc/igc_ptp-6.1-ethercat.c b/devices/igc/igc_ptp-6.1-ethercat.c new file mode 100644 index 00000000..d6b9b223 --- /dev/null +++ b/devices/igc/igc_ptp-6.1-ethercat.c @@ -0,0 +1,1120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc-6.1-ethercat.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +#define IGC_PTP_TX_TIMEOUT (HZ * 15) + +#define IGC_PTM_STAT_SLEEP 2 +#define IGC_PTM_STAT_TIMEOUT 100 + +/* SYSTIM read access for I225 */ +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp is latched when SYSTIML is read. */ + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igc_ptp_write_i225(struct igc_adapter *adapter, + const struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_SYSTIML, ts->tv_nsec); + wr32(IGC_SYSTIMH, ts->tv_sec); +} + +static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 14; + rate = div_u64(rate, 78125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(IGC_TIMINCA, inca); + + return 0; +} + +static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_read(igc, &now); + now = timespec64_add(now, then); + igc_ptp_write_i225(igc, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML); + ts->tv_sec = rd32(IGC_SYSTIMH); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_write_i225(igc, ts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 | + IGC_TSAUXC_ST1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 | + IGC_TSAUXC_ST0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success. + **/ +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + switch (adapter->hw.mac.type) { + case igc_i225: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + return -EINVAL; + } + return 0; +} + +/** + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer + * + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + * + * Returns timestamp value. + */ +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) +{ + ktime_t timestamp; + u32 secs, nsecs; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + */ + nsecs = le32_to_cpu(buf[2]); + secs = le32_to_cpu(buf[3]); + + timestamp = ktime_set(secs, nsecs); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; + } + + return ktime_sub_ns(timestamp, adjust); +} + +static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + wr32(IGC_TSYNCRXCTL, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + val &= ~IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = rd32(IGC_RXPBS); + val &= ~IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); +} + +static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + val = rd32(IGC_RXPBS); + val |= IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + /* FIXME: For now, only support retrieving RX timestamps from + * timer 0. + */ + val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | + IGC_TSYNCRXCTL_RXSYNSIG; + wr32(IGC_TSYNCRXCTL, val); +} + +static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, 0); +} + +static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); + + /* Read TXSTMP registers to discard any timestamp previously stored. */ + rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); +} + +/** + * igc_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, + struct hwtstamp_config *config) +{ + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + igc_ptp_disable_tx_timestamp(adapter); + break; + case HWTSTAMP_TX_ON: + igc_ptp_enable_tx_timestamp(adapter); + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + igc_ptp_disable_rx_timestamp(adapter); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + igc_ptp_enable_rx_timestamp(adapter); + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return 0; +} + +static void igc_ptp_tx_timeout(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ + rd32(IGC_TXSTMPH); + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +} + +void igc_ptp_tx_hang(struct igc_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IGC_PTP_TX_TIMEOUT); + + if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + cancel_work_sync(&adapter->ptp_tx_work); + igc_ptp_tx_timeout(adapter); + } +} + +/** + * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + */ +static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + struct igc_hw *hw = &adapter->hw; + int adjust = 0; + u64 regval; + + if (WARN_ON_ONCE(!skb)) + return; + + regval = rd32(IGC_TXSTMPL); + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + /* Clear the lock early before calling skb_tstamp_tx so that + * applications are not woken up before the lock bit is clear. We use + * a copy of the skb pointer to ensure other threads can't change it + * while we're notifying the stack. + */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igc_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + */ +static void igc_ptp_tx_work(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, struct igc_adapter, + ptp_tx_work); + struct igc_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + tsynctxctl = rd32(IGC_TSYNCTXCTL); + if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0))) + return; + + igc_ptp_tx_hwtstamp(adapter); +} + +/** + * igc_ptp_set_ts_config - set hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + **/ +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igc_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_get_ts_config - get hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ +static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) +{ + if (!IS_ENABLED(CONFIG_X86_TSC)) + return false; + + /* FIXME: it was noticed that enabling support for PCIe PTM in + * some i225-V models could cause lockups when bringing the + * interface up/down. There should be no downsides to + * disabling crosstimestamping support for i225-V, as it + * doesn't have any PTP support. That way we gain some time + * while root causing the issue. + */ + if (adapter->pdev->device == IGC_DEV_ID_I225_V) + return false; + + return pcie_ptm_enabled(adapter->pdev); +} + +static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) +{ +#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML) + return convert_art_ns_to_tsc(tstamp); +#else + return (struct system_counterval_t) { }; +#endif +} + +static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) +{ + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } +} + +static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; +} + +static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); +} + +/** + * igc_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igc_ptp_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + int i; + + switch (hw->mac.type) { + case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; + adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; + adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.settime64 = igc_ptp_settime_i225; + adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); + adapter->ptp_reset_start = ktime_get(); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + netdev_err(netdev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + netdev_info(netdev, "PHC added\n"); + adapter->ptp_flags |= IGC_PTP_ENABLED; + } +} + +static void igc_ptp_time_save(struct igc_adapter *adapter) +{ + igc_ptp_read(adapter, &adapter->prev_ptp_time); + adapter->ptp_reset_start = ktime_get(); +} + +static void igc_ptp_time_restore(struct igc_adapter *adapter) +{ + struct timespec64 ts = adapter->prev_ptp_time; + ktime_t delta; + + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); + + timespec64_add_ns(&ts, ktime_to_ns(delta)); + + igc_ptp_write_i225(adapter, &ts); +} + +static void igc_ptm_stop(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_EN; + + wr32(IGC_PTM_CTRL, ctrl); +} + +/** + * igc_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igc_ptp_suspend(struct igc_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + + cancel_work_sync(&adapter->ptp_tx_work); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + if (pci_device_is_present(adapter->pdev)) { + igc_ptp_time_save(adapter); + igc_ptm_stop(adapter); + } +} + +/** + * igc_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igc_ptp_stop(struct igc_adapter *adapter) +{ + igc_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + netdev_info(adapter->netdev, "PHC removed\n"); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; + } +} + +/** + * igc_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igc_ptp_reset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; + unsigned long flags; + u32 timadj; + + /* reset the tstamp_config */ + igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + + wr32(IGC_TSAUXC, 0x0); + wr32(IGC_TSSDP, 0x0); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); + wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if (hw->mac.type == igc_i225) { + igc_ptp_time_restore(adapter); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); +} diff --git a/devices/igc/igc_ptp-6.1-orig.c b/devices/igc/igc_ptp-6.1-orig.c new file mode 100644 index 00000000..4e10ced7 --- /dev/null +++ b/devices/igc/igc_ptp-6.1-orig.c @@ -0,0 +1,1120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +#define IGC_PTP_TX_TIMEOUT (HZ * 15) + +#define IGC_PTM_STAT_SLEEP 2 +#define IGC_PTM_STAT_TIMEOUT 100 + +/* SYSTIM read access for I225 */ +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp is latched when SYSTIML is read. */ + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igc_ptp_write_i225(struct igc_adapter *adapter, + const struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_SYSTIML, ts->tv_nsec); + wr32(IGC_SYSTIMH, ts->tv_sec); +} + +static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 14; + rate = div_u64(rate, 78125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(IGC_TIMINCA, inca); + + return 0; +} + +static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_read(igc, &now); + now = timespec64_add(now, then); + igc_ptp_write_i225(igc, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML); + ts->tv_sec = rd32(IGC_SYSTIMH); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_write_i225(igc, ts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 | + IGC_TSAUXC_ST1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 | + IGC_TSAUXC_ST0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success. + **/ +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + switch (adapter->hw.mac.type) { + case igc_i225: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + return -EINVAL; + } + return 0; +} + +/** + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer + * + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + * + * Returns timestamp value. + */ +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) +{ + ktime_t timestamp; + u32 secs, nsecs; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + */ + nsecs = le32_to_cpu(buf[2]); + secs = le32_to_cpu(buf[3]); + + timestamp = ktime_set(secs, nsecs); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; + } + + return ktime_sub_ns(timestamp, adjust); +} + +static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + wr32(IGC_TSYNCRXCTL, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + val &= ~IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = rd32(IGC_RXPBS); + val &= ~IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); +} + +static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + val = rd32(IGC_RXPBS); + val |= IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + /* FIXME: For now, only support retrieving RX timestamps from + * timer 0. + */ + val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | + IGC_TSYNCRXCTL_RXSYNSIG; + wr32(IGC_TSYNCRXCTL, val); +} + +static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, 0); +} + +static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); + + /* Read TXSTMP registers to discard any timestamp previously stored. */ + rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); +} + +/** + * igc_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, + struct hwtstamp_config *config) +{ + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + igc_ptp_disable_tx_timestamp(adapter); + break; + case HWTSTAMP_TX_ON: + igc_ptp_enable_tx_timestamp(adapter); + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + igc_ptp_disable_rx_timestamp(adapter); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + igc_ptp_enable_rx_timestamp(adapter); + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return 0; +} + +static void igc_ptp_tx_timeout(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ + rd32(IGC_TXSTMPH); + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +} + +void igc_ptp_tx_hang(struct igc_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IGC_PTP_TX_TIMEOUT); + + if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + cancel_work_sync(&adapter->ptp_tx_work); + igc_ptp_tx_timeout(adapter); + } +} + +/** + * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + */ +static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + struct igc_hw *hw = &adapter->hw; + int adjust = 0; + u64 regval; + + if (WARN_ON_ONCE(!skb)) + return; + + regval = rd32(IGC_TXSTMPL); + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + /* Clear the lock early before calling skb_tstamp_tx so that + * applications are not woken up before the lock bit is clear. We use + * a copy of the skb pointer to ensure other threads can't change it + * while we're notifying the stack. + */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igc_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + */ +static void igc_ptp_tx_work(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, struct igc_adapter, + ptp_tx_work); + struct igc_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + tsynctxctl = rd32(IGC_TSYNCTXCTL); + if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0))) + return; + + igc_ptp_tx_hwtstamp(adapter); +} + +/** + * igc_ptp_set_ts_config - set hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + **/ +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igc_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_get_ts_config - get hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ +static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) +{ + if (!IS_ENABLED(CONFIG_X86_TSC)) + return false; + + /* FIXME: it was noticed that enabling support for PCIe PTM in + * some i225-V models could cause lockups when bringing the + * interface up/down. There should be no downsides to + * disabling crosstimestamping support for i225-V, as it + * doesn't have any PTP support. That way we gain some time + * while root causing the issue. + */ + if (adapter->pdev->device == IGC_DEV_ID_I225_V) + return false; + + return pcie_ptm_enabled(adapter->pdev); +} + +static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) +{ +#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML) + return convert_art_ns_to_tsc(tstamp); +#else + return (struct system_counterval_t) { }; +#endif +} + +static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) +{ + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } +} + +static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; +} + +static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); +} + +/** + * igc_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igc_ptp_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + int i; + + switch (hw->mac.type) { + case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; + adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; + adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.settime64 = igc_ptp_settime_i225; + adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); + adapter->ptp_reset_start = ktime_get(); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + netdev_err(netdev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + netdev_info(netdev, "PHC added\n"); + adapter->ptp_flags |= IGC_PTP_ENABLED; + } +} + +static void igc_ptp_time_save(struct igc_adapter *adapter) +{ + igc_ptp_read(adapter, &adapter->prev_ptp_time); + adapter->ptp_reset_start = ktime_get(); +} + +static void igc_ptp_time_restore(struct igc_adapter *adapter) +{ + struct timespec64 ts = adapter->prev_ptp_time; + ktime_t delta; + + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); + + timespec64_add_ns(&ts, ktime_to_ns(delta)); + + igc_ptp_write_i225(adapter, &ts); +} + +static void igc_ptm_stop(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_EN; + + wr32(IGC_PTM_CTRL, ctrl); +} + +/** + * igc_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igc_ptp_suspend(struct igc_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + + cancel_work_sync(&adapter->ptp_tx_work); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + if (pci_device_is_present(adapter->pdev)) { + igc_ptp_time_save(adapter); + igc_ptm_stop(adapter); + } +} + +/** + * igc_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igc_ptp_stop(struct igc_adapter *adapter) +{ + igc_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + netdev_info(adapter->netdev, "PHC removed\n"); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; + } +} + +/** + * igc_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igc_ptp_reset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; + unsigned long flags; + u32 timadj; + + /* reset the tstamp_config */ + igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + + wr32(IGC_TSAUXC, 0x0); + wr32(IGC_TSSDP, 0x0); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); + wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if (hw->mac.type == igc_i225) { + igc_ptp_time_restore(adapter); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); +} diff --git a/devices/igc/igc_regs-5.14-ethercat.h b/devices/igc/igc_regs-5.14-ethercat.h new file mode 100644 index 00000000..0f829905 --- /dev/null +++ b/devices/igc/igc_regs-5.14-ethercat.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ +#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */ + +/* Internal Packet Buffer Size Registers */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ + +/* Flow Control Register Descriptions */ +#define IGC_FCAL 0x00028 /* FC Address Low - RW */ +#define IGC_FCAH 0x0002C /* FC Address High - RW */ +#define IGC_FCT 0x00030 /* FC Type - RW */ +#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ +#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ +#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ + +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ + +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 + +/* Interrupt Register Description */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */ +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +/* Intr Throttle - RW */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +/* Interrupt Vector Allocation - RW */ +#define IGC_IVAR0 0x01700 +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ + +/* MSI-X Table Register Descriptions */ +#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ + +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) + +/* Receive Register Descriptions */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) +#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) +#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) +#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) +#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) +#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) +#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) +#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */ + +/* Transmit Register Descriptions */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ +#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) +#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) +#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) +#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) +#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) +#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +/* MMD Register Descriptions */ +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ +#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ + +/* Time sync registers */ +#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */ +#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */ +#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ + +#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ + +#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ + +/* Transmit Scheduling Registers */ +#define IGC_TQAVCTRL 0x3570 +#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) +#define IGC_BASET_L 0x3314 +#define IGC_BASET_H 0x3318 +#define IGC_QBVCYCLET 0x331C +#define IGC_QBVCYCLET_S 0x3320 + +#define IGC_STQT(_n) (0x3324 + 0x4 * (_n)) +#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) +#define IGC_DTXMXPKTSZ 0x355C + +/* System Time Registers */ +#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ +#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ +#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ + +#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ + +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ + +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 + +/* Wake Up registers */ +#define IGC_WUC 0x05800 /* Wakeup Control - RW */ +#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ + +/* Wake Up packet memory */ +#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) + +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ + +/* LTR registers */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_DMACR 0x02508 /* DMA Coalescing Control Register */ +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + +/* forward declaration */ +struct igc_hw; +u32 igc_rd32(struct igc_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igc_rd32(hw, reg)) + +#define wrfl() ((void)rd32(IGC_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) + +#endif diff --git a/devices/igc/igc_regs-5.14-orig.h b/devices/igc/igc_regs-5.14-orig.h new file mode 100644 index 00000000..0f829905 --- /dev/null +++ b/devices/igc/igc_regs-5.14-orig.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ +#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */ + +/* Internal Packet Buffer Size Registers */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ + +/* Flow Control Register Descriptions */ +#define IGC_FCAL 0x00028 /* FC Address Low - RW */ +#define IGC_FCAH 0x0002C /* FC Address High - RW */ +#define IGC_FCT 0x00030 /* FC Type - RW */ +#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ +#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ +#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ + +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ + +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 + +/* Interrupt Register Description */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */ +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +/* Intr Throttle - RW */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +/* Interrupt Vector Allocation - RW */ +#define IGC_IVAR0 0x01700 +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ + +/* MSI-X Table Register Descriptions */ +#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ + +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) + +/* Receive Register Descriptions */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) +#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) +#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) +#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) +#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) +#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) +#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) +#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */ + +/* Transmit Register Descriptions */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ +#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) +#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) +#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) +#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) +#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) +#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +/* MMD Register Descriptions */ +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ +#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ + +/* Time sync registers */ +#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */ +#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */ +#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ + +#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ + +#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ + +/* Transmit Scheduling Registers */ +#define IGC_TQAVCTRL 0x3570 +#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) +#define IGC_BASET_L 0x3314 +#define IGC_BASET_H 0x3318 +#define IGC_QBVCYCLET 0x331C +#define IGC_QBVCYCLET_S 0x3320 + +#define IGC_STQT(_n) (0x3324 + 0x4 * (_n)) +#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) +#define IGC_DTXMXPKTSZ 0x355C + +/* System Time Registers */ +#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ +#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ +#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ + +#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ + +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ + +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 + +/* Wake Up registers */ +#define IGC_WUC 0x05800 /* Wakeup Control - RW */ +#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ + +/* Wake Up packet memory */ +#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) + +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ + +/* LTR registers */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_DMACR 0x02508 /* DMA Coalescing Control Register */ +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + +/* forward declaration */ +struct igc_hw; +u32 igc_rd32(struct igc_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igc_rd32(hw, reg)) + +#define wrfl() ((void)rd32(IGC_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) + +#endif diff --git a/devices/igc/igc_regs-6.1-ethercat.h b/devices/igc/igc_regs-6.1-ethercat.h new file mode 100644 index 00000000..c0d82141 --- /dev/null +++ b/devices/igc/igc_regs-6.1-ethercat.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ +#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */ + +/* Internal Packet Buffer Size Registers */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ + +/* Flow Control Register Descriptions */ +#define IGC_FCAL 0x00028 /* FC Address Low - RW */ +#define IGC_FCAH 0x0002C /* FC Address High - RW */ +#define IGC_FCT 0x00030 /* FC Type - RW */ +#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ +#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ +#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ + +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ + +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 + +/* Interrupt Register Description */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */ +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +/* Intr Throttle - RW */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +/* Interrupt Vector Allocation - RW */ +#define IGC_IVAR0 0x01700 +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ + +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ +#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */ +#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */ +#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + +/* FHFT register bit definitions */ +#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0) +#define IGC_FHFT_QUEUE_SHIFT 8 +#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8) +#define IGC_FHFT_PRIO_SHIFT 16 +#define IGC_FHFT_PRIO_MASK GENMASK(18, 16) +#define IGC_FHFT_IMM_INT BIT(24) +#define IGC_FHFT_DROP BIT(25) + +/* FHFTSL register bit definitions */ +#define IGC_FHFTSL_FTSL_SHIFT 0 +#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0) + +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) + +/* Receive Register Descriptions */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) +#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) +#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) +#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) +#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) +#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) +#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) +#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */ + +/* Transmit Register Descriptions */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ +#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) +#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) +#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) +#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) +#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) +#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +/* MMD Register Descriptions */ +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ +#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ + +/* Time sync registers */ +#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */ +#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */ +#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ + +#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ + +#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ + +/* Transmit Scheduling Registers */ +#define IGC_TQAVCTRL 0x3570 +#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) +#define IGC_BASET_L 0x3314 +#define IGC_BASET_H 0x3318 +#define IGC_QBVCYCLET 0x331C +#define IGC_QBVCYCLET_S 0x3320 + +#define IGC_STQT(_n) (0x3324 + 0x4 * (_n)) +#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) +#define IGC_DTXMXPKTSZ 0x355C + +#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +/* System Time Registers */ +#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ +#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ +#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ + +#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ + +#define IGC_TIMADJ 0x0B60C /* Time Adjustment Offset Register */ + +/* PCIe Registers */ +#define IGC_PTM_CTRL 0x12540 /* PTM Control */ +#define IGC_PTM_STAT 0x12544 /* PTM Status */ +#define IGC_PTM_CYCLE_CTRL 0x1254C /* PTM Cycle Control */ + +/* PTM Time registers */ +#define IGC_PTM_T1_TIM0_L 0x12558 /* T1 on Timer 0 Low */ +#define IGC_PTM_T1_TIM0_H 0x1255C /* T1 on Timer 0 High */ + +#define IGC_PTM_CURR_T2_L 0x1258C /* Current T2 Low */ +#define IGC_PTM_CURR_T2_H 0x12590 /* Current T2 High */ +#define IGC_PTM_PREV_T2_L 0x12584 /* Previous T2 Low */ +#define IGC_PTM_PREV_T2_H 0x12588 /* Previous T2 High */ +#define IGC_PTM_PREV_T4M1 0x12578 /* T4 Minus T1 on previous PTM Cycle */ +#define IGC_PTM_CURR_T4M1 0x1257C /* T4 Minus T1 on this PTM Cycle */ +#define IGC_PTM_PREV_T3M2 0x12580 /* T3 Minus T2 on previous PTM Cycle */ +#define IGC_PTM_TDELAY 0x12594 /* PTM PCIe Link Delay */ + +#define IGC_PCIE_DIG_DELAY 0x12550 /* PCIe Digital Delay */ +#define IGC_PCIE_PHY_DELAY 0x12554 /* PCIe PHY Delay */ + +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ + +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 + +/* Wake Up registers */ +#define IGC_WUC 0x05800 /* Wakeup Control - RW */ +#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */ + +/* Wake Up packet memory */ +#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) + +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ + +/* LTR registers */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_DMACR 0x02508 /* DMA Coalescing Control Register */ +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + +/* forward declaration */ +struct igc_hw; +u32 igc_rd32(struct igc_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igc_rd32(hw, reg)) + +#define wrfl() ((void)rd32(IGC_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) + +#define IGC_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igc/igc_regs-6.1-orig.h b/devices/igc/igc_regs-6.1-orig.h new file mode 100644 index 00000000..c0d82141 --- /dev/null +++ b/devices/igc/igc_regs-6.1-orig.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ +#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */ + +/* Internal Packet Buffer Size Registers */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ + +/* Flow Control Register Descriptions */ +#define IGC_FCAL 0x00028 /* FC Address Low - RW */ +#define IGC_FCAH 0x0002C /* FC Address High - RW */ +#define IGC_FCT 0x00030 /* FC Type - RW */ +#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ +#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ +#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ + +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ + +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 + +/* Interrupt Register Description */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */ +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +/* Intr Throttle - RW */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +/* Interrupt Vector Allocation - RW */ +#define IGC_IVAR0 0x01700 +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ + +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ +#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */ +#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */ +#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + +/* FHFT register bit definitions */ +#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0) +#define IGC_FHFT_QUEUE_SHIFT 8 +#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8) +#define IGC_FHFT_PRIO_SHIFT 16 +#define IGC_FHFT_PRIO_MASK GENMASK(18, 16) +#define IGC_FHFT_IMM_INT BIT(24) +#define IGC_FHFT_DROP BIT(25) + +/* FHFTSL register bit definitions */ +#define IGC_FHFTSL_FTSL_SHIFT 0 +#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0) + +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) + +/* Receive Register Descriptions */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) +#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) +#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) +#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) +#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) +#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) +#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) +#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */ + +/* Transmit Register Descriptions */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ +#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) +#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) +#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) +#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) +#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) +#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +/* MMD Register Descriptions */ +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ +#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ + +/* Time sync registers */ +#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */ +#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */ +#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ + +#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ + +#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ + +/* Transmit Scheduling Registers */ +#define IGC_TQAVCTRL 0x3570 +#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) +#define IGC_BASET_L 0x3314 +#define IGC_BASET_H 0x3318 +#define IGC_QBVCYCLET 0x331C +#define IGC_QBVCYCLET_S 0x3320 + +#define IGC_STQT(_n) (0x3324 + 0x4 * (_n)) +#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) +#define IGC_DTXMXPKTSZ 0x355C + +#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +/* System Time Registers */ +#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ +#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ +#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ + +#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ + +#define IGC_TIMADJ 0x0B60C /* Time Adjustment Offset Register */ + +/* PCIe Registers */ +#define IGC_PTM_CTRL 0x12540 /* PTM Control */ +#define IGC_PTM_STAT 0x12544 /* PTM Status */ +#define IGC_PTM_CYCLE_CTRL 0x1254C /* PTM Cycle Control */ + +/* PTM Time registers */ +#define IGC_PTM_T1_TIM0_L 0x12558 /* T1 on Timer 0 Low */ +#define IGC_PTM_T1_TIM0_H 0x1255C /* T1 on Timer 0 High */ + +#define IGC_PTM_CURR_T2_L 0x1258C /* Current T2 Low */ +#define IGC_PTM_CURR_T2_H 0x12590 /* Current T2 High */ +#define IGC_PTM_PREV_T2_L 0x12584 /* Previous T2 Low */ +#define IGC_PTM_PREV_T2_H 0x12588 /* Previous T2 High */ +#define IGC_PTM_PREV_T4M1 0x12578 /* T4 Minus T1 on previous PTM Cycle */ +#define IGC_PTM_CURR_T4M1 0x1257C /* T4 Minus T1 on this PTM Cycle */ +#define IGC_PTM_PREV_T3M2 0x12580 /* T3 Minus T2 on previous PTM Cycle */ +#define IGC_PTM_TDELAY 0x12594 /* PTM PCIe Link Delay */ + +#define IGC_PCIE_DIG_DELAY 0x12550 /* PCIe Digital Delay */ +#define IGC_PCIE_PHY_DELAY 0x12554 /* PCIe PHY Delay */ + +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ + +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 + +/* Wake Up registers */ +#define IGC_WUC 0x05800 /* Wakeup Control - RW */ +#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */ + +/* Wake Up packet memory */ +#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) + +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ + +/* LTR registers */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_DMACR 0x02508 /* DMA Coalescing Control Register */ +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + +/* forward declaration */ +struct igc_hw; +u32 igc_rd32(struct igc_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igc_rd32(hw, reg)) + +#define wrfl() ((void)rd32(IGC_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) + +#define IGC_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igc/igc_tsn-5.14-ethercat.c b/devices/igc/igc_tsn-5.14-ethercat.c new file mode 100644 index 00000000..d83b896e --- /dev/null +++ b/devices/igc/igc_tsn-5.14-ethercat.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc-5.14-ethercat.h" +#include "igc_tsn-5.14-ethercat.h" + +static bool is_any_launchtime(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->launchtime_enable) + return true; + } + + return false; +} + +/* Returns the TSN specific registers to their default values after + * TSN offloading is disabled. + */ +static int igc_tsn_disable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl; + int i; + + if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)) + return 0; + + adapter->cycle_time = 0; + + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | + IGC_TQAVCTRL_ENHANCED_QAV); + wr32(IGC_TQAVCTRL, tqavctrl); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = 0; + ring->launchtime_enable = false; + + wr32(IGC_TXQCTL(i), 0); + wr32(IGC_STQT(i), 0); + wr32(IGC_ENDQT(i), NSEC_PER_SEC); + } + + wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC); + wr32(IGC_QBVCYCLET, NSEC_PER_SEC); + + adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +static int igc_tsn_enable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl, baset_l, baset_h; + u32 sec, nsec, cycle; + ktime_t base_time, systim; + int i; + + if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) + return 0; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + + wr32(IGC_TSAUXC, 0); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; + + wr32(IGC_STQT(i), ring->start_time); + wr32(IGC_ENDQT(i), ring->end_time); + + if (adapter->base_time) { + /* If we have a base_time we are in "taprio" + * mode and we need to be strict about the + * cycles: only transmit a packet if it can be + * completed during that cycle. + */ + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; + } + + if (ring->launchtime_enable) + txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + + wr32(IGC_TXQCTL(i), txqctl); + } + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + systim = ktime_set(sec, nsec); + + if (ktime_compare(systim, base_time) > 0) { + s64 n; + + n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + base_time = ktime_add_ns(base_time, (n + 1) * cycle); + } + + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + + wr32(IGC_BASET_H, baset_h); + wr32(IGC_BASET_L, baset_l); + + adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter); + + if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled) + return 0; + + if (!is_any_enabled) { + int err = igc_tsn_disable_offload(adapter); + + if (err < 0) + return err; + + /* The BASET registers aren't cleared when writing + * into them, force a reset if the interface is + * running. + */ + if (netif_running(adapter->netdev)) + schedule_work(&adapter->reset_task); + + return 0; + } + + return igc_tsn_enable_offload(adapter); +} diff --git a/devices/igc/igc_tsn-5.14-ethercat.h b/devices/igc/igc_tsn-5.14-ethercat.h new file mode 100644 index 00000000..f76bc86d --- /dev/null +++ b/devices/igc/igc_tsn-5.14-ethercat.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +#ifndef _IGC_TSN_H_ +#define _IGC_TSN_H_ + +int igc_tsn_offload_apply(struct igc_adapter *adapter); + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_tsn-5.14-orig.c b/devices/igc/igc_tsn-5.14-orig.c new file mode 100644 index 00000000..174103c4 --- /dev/null +++ b/devices/igc/igc_tsn-5.14-orig.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc.h" +#include "igc_tsn.h" + +static bool is_any_launchtime(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->launchtime_enable) + return true; + } + + return false; +} + +/* Returns the TSN specific registers to their default values after + * TSN offloading is disabled. + */ +static int igc_tsn_disable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl; + int i; + + if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)) + return 0; + + adapter->cycle_time = 0; + + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | + IGC_TQAVCTRL_ENHANCED_QAV); + wr32(IGC_TQAVCTRL, tqavctrl); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = 0; + ring->launchtime_enable = false; + + wr32(IGC_TXQCTL(i), 0); + wr32(IGC_STQT(i), 0); + wr32(IGC_ENDQT(i), NSEC_PER_SEC); + } + + wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC); + wr32(IGC_QBVCYCLET, NSEC_PER_SEC); + + adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +static int igc_tsn_enable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl, baset_l, baset_h; + u32 sec, nsec, cycle; + ktime_t base_time, systim; + int i; + + if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) + return 0; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + + wr32(IGC_TSAUXC, 0); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; + + wr32(IGC_STQT(i), ring->start_time); + wr32(IGC_ENDQT(i), ring->end_time); + + if (adapter->base_time) { + /* If we have a base_time we are in "taprio" + * mode and we need to be strict about the + * cycles: only transmit a packet if it can be + * completed during that cycle. + */ + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; + } + + if (ring->launchtime_enable) + txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + + wr32(IGC_TXQCTL(i), txqctl); + } + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + systim = ktime_set(sec, nsec); + + if (ktime_compare(systim, base_time) > 0) { + s64 n; + + n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + base_time = ktime_add_ns(base_time, (n + 1) * cycle); + } + + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + + wr32(IGC_BASET_H, baset_h); + wr32(IGC_BASET_L, baset_l); + + adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter); + + if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled) + return 0; + + if (!is_any_enabled) { + int err = igc_tsn_disable_offload(adapter); + + if (err < 0) + return err; + + /* The BASET registers aren't cleared when writing + * into them, force a reset if the interface is + * running. + */ + if (netif_running(adapter->netdev)) + schedule_work(&adapter->reset_task); + + return 0; + } + + return igc_tsn_enable_offload(adapter); +} diff --git a/devices/igc/igc_tsn-5.14-orig.h b/devices/igc/igc_tsn-5.14-orig.h new file mode 100644 index 00000000..f76bc86d --- /dev/null +++ b/devices/igc/igc_tsn-5.14-orig.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +#ifndef _IGC_TSN_H_ +#define _IGC_TSN_H_ + +int igc_tsn_offload_apply(struct igc_adapter *adapter); + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_tsn-6.1-ethercat.c b/devices/igc/igc_tsn-6.1-ethercat.c new file mode 100644 index 00000000..ec188458 --- /dev/null +++ b/devices/igc/igc_tsn-6.1-ethercat.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc-6.1-ethercat.h" +#include "igc_tsn-6.1-ethercat.h" + +static bool is_any_launchtime(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->launchtime_enable) + return true; + } + + return false; +} + +static bool is_cbs_enabled(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) +{ + unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; + + if (adapter->qbv_enable) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_any_launchtime(adapter)) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_cbs_enabled(adapter)) + new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + + return new_flags; +} + +/* Returns the TSN specific registers to their default values after + * the adapter is reset. + */ +static int igc_tsn_disable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl; + int i; + + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | + IGC_TQAVCTRL_ENHANCED_QAV); + wr32(IGC_TQAVCTRL, tqavctrl); + + for (i = 0; i < adapter->num_tx_queues; i++) { + wr32(IGC_TXQCTL(i), 0); + wr32(IGC_STQT(i), 0); + wr32(IGC_ENDQT(i), NSEC_PER_SEC); + } + + wr32(IGC_QBVCYCLET_S, 0); + wr32(IGC_QBVCYCLET, NSEC_PER_SEC); + + adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +static int igc_tsn_enable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl, baset_l, baset_h; + u32 sec, nsec, cycle; + ktime_t base_time, systim; + int i; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + + wr32(IGC_TSAUXC, 0); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; + u16 cbs_value; + u32 tqavcc; + + wr32(IGC_STQT(i), ring->start_time); + wr32(IGC_ENDQT(i), ring->end_time); + + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; + + if (ring->launchtime_enable) + txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + + /* Skip configuring CBS for Q2 and Q3 */ + if (i > 1) + goto skip_cbs; + + if (ring->cbs_enable) { + if (i == 0) + txqctl |= IGC_TXQCTL_QAV_SEL_CBS0; + else + txqctl |= IGC_TXQCTL_QAV_SEL_CBS1; + + /* According to i225 datasheet section 7.5.2.7, we + * should set the 'idleSlope' field from TQAVCC + * register following the equation: + * + * value = link-speed 0x7736 * BW * 0.2 + * ---------- * ----------------- (E1) + * 100Mbps 2.5 + * + * Note that 'link-speed' is in Mbps. + * + * 'BW' is the percentage bandwidth out of full + * link speed which can be found with the + * following equation. Note that idleSlope here + * is the parameter from this function + * which is in kbps. + * + * BW = idleSlope + * ----------------- (E2) + * link-speed * 1000 + * + * That said, we can come up with a generic + * equation to calculate the value we should set + * it TQAVCC register by replacing 'BW' in E1 by E2. + * The resulting equation is: + * + * value = link-speed * 0x7736 * idleSlope * 0.2 + * ------------------------------------- (E3) + * 100 * 2.5 * link-speed * 1000 + * + * 'link-speed' is present in both sides of the + * fraction so it is canceled out. The final + * equation is the following: + * + * value = idleSlope * 61036 + * ----------------- (E4) + * 2500000 + * + * NOTE: For i225, given the above, we can see + * that idleslope is represented in + * 40.959433 kbps units by the value at + * the TQAVCC register (2.5Gbps / 61036), + * which reduces the granularity for + * idleslope increments. + * + * In i225 controller, the sendSlope and loCredit + * parameters from CBS are not configurable + * by software so we don't do any + * 'controller configuration' in respect to + * these parameters. + */ + cbs_value = DIV_ROUND_UP_ULL(ring->idleslope + * 61036ULL, 2500000); + + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK; + tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS; + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), + 0x80000000 + ring->hicredit * 0x7735); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); + + /* Set idleSlope to zero. */ + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK | + IGC_TQAVCC_KEEP_CREDITS); + wr32(IGC_TQAVCC(i), tqavcc); + + /* Set hiCredit to zero. */ + wr32(IGC_TQAVHC(i), 0); + } +skip_cbs: + wr32(IGC_TXQCTL(i), txqctl); + } + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + systim = ktime_set(sec, nsec); + + if (ktime_compare(systim, base_time) > 0) { + s64 n; + + n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + base_time = ktime_add_ns(base_time, (n + 1) * cycle); + } + + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + + wr32(IGC_BASET_H, baset_h); + wr32(IGC_BASET_L, baset_l); + + return 0; +} + +int igc_tsn_reset(struct igc_adapter *adapter) +{ + unsigned int new_flags; + int err = 0; + + new_flags = igc_tsn_new_flags(adapter); + + if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) + return igc_tsn_disable_offload(adapter); + + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = new_flags; + + return err; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + int err; + + if (netif_running(adapter->netdev)) { + schedule_work(&adapter->reset_task); + return 0; + } + + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = igc_tsn_new_flags(adapter); + return 0; +} diff --git a/devices/igc/igc_tsn-6.1-ethercat.h b/devices/igc/igc_tsn-6.1-ethercat.h new file mode 100644 index 00000000..1512307f --- /dev/null +++ b/devices/igc/igc_tsn-6.1-ethercat.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +#ifndef _IGC_TSN_H_ +#define _IGC_TSN_H_ + +int igc_tsn_offload_apply(struct igc_adapter *adapter); +int igc_tsn_reset(struct igc_adapter *adapter); + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_tsn-6.1-orig.c b/devices/igc/igc_tsn-6.1-orig.c new file mode 100644 index 00000000..356c7455 --- /dev/null +++ b/devices/igc/igc_tsn-6.1-orig.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc.h" +#include "igc_tsn.h" + +static bool is_any_launchtime(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->launchtime_enable) + return true; + } + + return false; +} + +static bool is_cbs_enabled(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) +{ + unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; + + if (adapter->qbv_enable) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_any_launchtime(adapter)) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_cbs_enabled(adapter)) + new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + + return new_flags; +} + +/* Returns the TSN specific registers to their default values after + * the adapter is reset. + */ +static int igc_tsn_disable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl; + int i; + + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | + IGC_TQAVCTRL_ENHANCED_QAV); + wr32(IGC_TQAVCTRL, tqavctrl); + + for (i = 0; i < adapter->num_tx_queues; i++) { + wr32(IGC_TXQCTL(i), 0); + wr32(IGC_STQT(i), 0); + wr32(IGC_ENDQT(i), NSEC_PER_SEC); + } + + wr32(IGC_QBVCYCLET_S, 0); + wr32(IGC_QBVCYCLET, NSEC_PER_SEC); + + adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +static int igc_tsn_enable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl, baset_l, baset_h; + u32 sec, nsec, cycle; + ktime_t base_time, systim; + int i; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + + wr32(IGC_TSAUXC, 0); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; + u16 cbs_value; + u32 tqavcc; + + wr32(IGC_STQT(i), ring->start_time); + wr32(IGC_ENDQT(i), ring->end_time); + + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; + + if (ring->launchtime_enable) + txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + + /* Skip configuring CBS for Q2 and Q3 */ + if (i > 1) + goto skip_cbs; + + if (ring->cbs_enable) { + if (i == 0) + txqctl |= IGC_TXQCTL_QAV_SEL_CBS0; + else + txqctl |= IGC_TXQCTL_QAV_SEL_CBS1; + + /* According to i225 datasheet section 7.5.2.7, we + * should set the 'idleSlope' field from TQAVCC + * register following the equation: + * + * value = link-speed 0x7736 * BW * 0.2 + * ---------- * ----------------- (E1) + * 100Mbps 2.5 + * + * Note that 'link-speed' is in Mbps. + * + * 'BW' is the percentage bandwidth out of full + * link speed which can be found with the + * following equation. Note that idleSlope here + * is the parameter from this function + * which is in kbps. + * + * BW = idleSlope + * ----------------- (E2) + * link-speed * 1000 + * + * That said, we can come up with a generic + * equation to calculate the value we should set + * it TQAVCC register by replacing 'BW' in E1 by E2. + * The resulting equation is: + * + * value = link-speed * 0x7736 * idleSlope * 0.2 + * ------------------------------------- (E3) + * 100 * 2.5 * link-speed * 1000 + * + * 'link-speed' is present in both sides of the + * fraction so it is canceled out. The final + * equation is the following: + * + * value = idleSlope * 61036 + * ----------------- (E4) + * 2500000 + * + * NOTE: For i225, given the above, we can see + * that idleslope is represented in + * 40.959433 kbps units by the value at + * the TQAVCC register (2.5Gbps / 61036), + * which reduces the granularity for + * idleslope increments. + * + * In i225 controller, the sendSlope and loCredit + * parameters from CBS are not configurable + * by software so we don't do any + * 'controller configuration' in respect to + * these parameters. + */ + cbs_value = DIV_ROUND_UP_ULL(ring->idleslope + * 61036ULL, 2500000); + + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK; + tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS; + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), + 0x80000000 + ring->hicredit * 0x7735); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); + + /* Set idleSlope to zero. */ + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK | + IGC_TQAVCC_KEEP_CREDITS); + wr32(IGC_TQAVCC(i), tqavcc); + + /* Set hiCredit to zero. */ + wr32(IGC_TQAVHC(i), 0); + } +skip_cbs: + wr32(IGC_TXQCTL(i), txqctl); + } + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + systim = ktime_set(sec, nsec); + + if (ktime_compare(systim, base_time) > 0) { + s64 n; + + n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + base_time = ktime_add_ns(base_time, (n + 1) * cycle); + } + + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + + wr32(IGC_BASET_H, baset_h); + wr32(IGC_BASET_L, baset_l); + + return 0; +} + +int igc_tsn_reset(struct igc_adapter *adapter) +{ + unsigned int new_flags; + int err = 0; + + new_flags = igc_tsn_new_flags(adapter); + + if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) + return igc_tsn_disable_offload(adapter); + + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = new_flags; + + return err; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + int err; + + if (netif_running(adapter->netdev)) { + schedule_work(&adapter->reset_task); + return 0; + } + + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = igc_tsn_new_flags(adapter); + return 0; +} diff --git a/devices/igc/igc_tsn-6.1-orig.h b/devices/igc/igc_tsn-6.1-orig.h new file mode 100644 index 00000000..1512307f --- /dev/null +++ b/devices/igc/igc_tsn-6.1-orig.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +#ifndef _IGC_TSN_H_ +#define _IGC_TSN_H_ + +int igc_tsn_offload_apply(struct igc_adapter *adapter); +int igc_tsn_reset(struct igc_adapter *adapter); + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_xdp-5.14-ethercat.c b/devices/igc/igc_xdp-5.14-ethercat.c new file mode 100644 index 00000000..bb928243 --- /dev/null +++ b/devices/igc/igc_xdp-5.14-ethercat.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include +#include + +#include "igc-5.14-ethercat.h" +#include "igc_xdp-5.14-ethercat.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + bool if_running = netif_running(dev); + struct bpf_prog *old_prog; + + if (dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + if (if_running) + igc_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (if_running) + igc_open(dev); + + return 0; +} + +static int igc_xdp_enable_pool(struct igc_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct net_device *ndev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; + struct igc_ring *rx_ring, *tx_ring; + struct napi_struct *napi; + bool needs_reset; + u32 frame_size; + int err; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { + /* When XDP is enabled, the driver doesn't support frames that + * span over multiple buffers. To avoid that, we check if xsk + * frame size is big enough to fit the max ethernet frame size + * + vlan double tagging. + */ + return -EOPNOTSUPP; + } + + err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR); + if (err) { + netdev_err(ndev, "Failed to map xsk pool\n"); + return err; + } + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + + err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); + if (err) { + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) +{ + struct igc_ring *rx_ring, *tx_ring; + struct xsk_buff_pool *pool; + struct napi_struct *napi; + bool needs_reset; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); + if (!pool) + return -EINVAL; + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + } + + return 0; +} + +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id) +{ + return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : + igc_xdp_disable_pool(adapter, queue_id); +} diff --git a/devices/igc/igc_xdp-5.14-ethercat.h b/devices/igc/igc_xdp-5.14-ethercat.h new file mode 100644 index 00000000..a74e5487 --- /dev/null +++ b/devices/igc/igc_xdp-5.14-ethercat.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id); + +static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + +#endif /* _IGC_XDP_H_ */ diff --git a/devices/igc/igc_xdp-5.14-orig.c b/devices/igc/igc_xdp-5.14-orig.c new file mode 100644 index 00000000..a8cf5374 --- /dev/null +++ b/devices/igc/igc_xdp-5.14-orig.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include + +#include "igc.h" +#include "igc_xdp.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + bool if_running = netif_running(dev); + struct bpf_prog *old_prog; + + if (dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + if (if_running) + igc_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (if_running) + igc_open(dev); + + return 0; +} + +static int igc_xdp_enable_pool(struct igc_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct net_device *ndev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; + struct igc_ring *rx_ring, *tx_ring; + struct napi_struct *napi; + bool needs_reset; + u32 frame_size; + int err; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { + /* When XDP is enabled, the driver doesn't support frames that + * span over multiple buffers. To avoid that, we check if xsk + * frame size is big enough to fit the max ethernet frame size + * + vlan double tagging. + */ + return -EOPNOTSUPP; + } + + err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR); + if (err) { + netdev_err(ndev, "Failed to map xsk pool\n"); + return err; + } + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + + err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); + if (err) { + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) +{ + struct igc_ring *rx_ring, *tx_ring; + struct xsk_buff_pool *pool; + struct napi_struct *napi; + bool needs_reset; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); + if (!pool) + return -EINVAL; + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + } + + return 0; +} + +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id) +{ + return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : + igc_xdp_disable_pool(adapter, queue_id); +} diff --git a/devices/igc/igc_xdp-5.14-orig.h b/devices/igc/igc_xdp-5.14-orig.h new file mode 100644 index 00000000..a74e5487 --- /dev/null +++ b/devices/igc/igc_xdp-5.14-orig.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id); + +static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + +#endif /* _IGC_XDP_H_ */ diff --git a/devices/igc/igc_xdp-6.1-ethercat.c b/devices/igc/igc_xdp-6.1-ethercat.c new file mode 100644 index 00000000..686644c1 --- /dev/null +++ b/devices/igc/igc_xdp-6.1-ethercat.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include +#include + +#include "igc-6.1-ethercat.h" +#include "igc_xdp-6.1-ethercat.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + bool if_running = netif_running(dev); + struct bpf_prog *old_prog; + + if (dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + if (if_running) + igc_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (if_running) + igc_open(dev); + + return 0; +} + +static int igc_xdp_enable_pool(struct igc_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct net_device *ndev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; + struct igc_ring *rx_ring, *tx_ring; + struct napi_struct *napi; + bool needs_reset; + u32 frame_size; + int err; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { + /* When XDP is enabled, the driver doesn't support frames that + * span over multiple buffers. To avoid that, we check if xsk + * frame size is big enough to fit the max ethernet frame size + * + vlan double tagging. + */ + return -EOPNOTSUPP; + } + + err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR); + if (err) { + netdev_err(ndev, "Failed to map xsk pool\n"); + return err; + } + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + + err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); + if (err) { + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) +{ + struct igc_ring *rx_ring, *tx_ring; + struct xsk_buff_pool *pool; + struct napi_struct *napi; + bool needs_reset; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); + if (!pool) + return -EINVAL; + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + } + + return 0; +} + +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id) +{ + return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : + igc_xdp_disable_pool(adapter, queue_id); +} diff --git a/devices/igc/igc_xdp-6.1-ethercat.h b/devices/igc/igc_xdp-6.1-ethercat.h new file mode 100644 index 00000000..a74e5487 --- /dev/null +++ b/devices/igc/igc_xdp-6.1-ethercat.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id); + +static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + +#endif /* _IGC_XDP_H_ */ diff --git a/devices/igc/igc_xdp-6.1-orig.c b/devices/igc/igc_xdp-6.1-orig.c new file mode 100644 index 00000000..aeeb34e6 --- /dev/null +++ b/devices/igc/igc_xdp-6.1-orig.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include +#include + +#include "igc.h" +#include "igc_xdp.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + bool if_running = netif_running(dev); + struct bpf_prog *old_prog; + + if (dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + if (if_running) + igc_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (if_running) + igc_open(dev); + + return 0; +} + +static int igc_xdp_enable_pool(struct igc_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct net_device *ndev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; + struct igc_ring *rx_ring, *tx_ring; + struct napi_struct *napi; + bool needs_reset; + u32 frame_size; + int err; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { + /* When XDP is enabled, the driver doesn't support frames that + * span over multiple buffers. To avoid that, we check if xsk + * frame size is big enough to fit the max ethernet frame size + * + vlan double tagging. + */ + return -EOPNOTSUPP; + } + + err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR); + if (err) { + netdev_err(ndev, "Failed to map xsk pool\n"); + return err; + } + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + + err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); + if (err) { + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) +{ + struct igc_ring *rx_ring, *tx_ring; + struct xsk_buff_pool *pool; + struct napi_struct *napi; + bool needs_reset; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); + if (!pool) + return -EINVAL; + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + } + + return 0; +} + +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id) +{ + return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : + igc_xdp_disable_pool(adapter, queue_id); +} diff --git a/devices/igc/igc_xdp-6.1-orig.h b/devices/igc/igc_xdp-6.1-orig.h new file mode 100644 index 00000000..a74e5487 --- /dev/null +++ b/devices/igc/igc_xdp-6.1-orig.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id); + +static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + +#endif /* _IGC_XDP_H_ */ diff --git a/devices/igc/update.sh b/devices/igc/update.sh index e20d2258..e9d81b38 100755 --- a/devices/igc/update.sh +++ b/devices/igc/update.sh @@ -1,7 +1,5 @@ #!/bin/bash -set -e - if [ $# -ne 3 ]; then echo "Need 3 arguments: 1) kernel source dir, 2) previous version, 3) version to add" exit 1 @@ -13,10 +11,8 @@ KERNELVER=$3 IGCDIR=drivers/net/ethernet/intel/igc -FILES="e1000_82575.c e1000_82575.h e1000_defines.h e1000_hw.h e1000_i210.c e1000_i210.h e1000_mac.c e1000_mac.h e1000_mbx.c e1000_mbx.h e1000_nvm.c e1000_nvm.h e1000_phy.c e1000_phy.h e1000_regs.h igc_ethtool.c igb.h igb_hwmon.c igb_main.c igb_ptp.c" -FILES="igc_base.c igc_diag.c igc_ethtool.c igc_i225.c igc_mac.h igc_nvm.c igc_phy.h igc_tsn.c igc_xdp.h igc_base.h igc_diag.h igc.h igc_i225.h igc_main.c igc_nvm.h igc_ptp.c igc_tsn.h igc_defines.h igc_dump.c igc_hw.h igc_mac.c igc_phy.c igc_regs.h igc_xdp.c" - -set -x +FILES="igc_base.c igc_defines.h igc_diag.h igc_ethtool.c igc_hw.h igc_i225.h igc_mac.h igc_nvm.c igc_phy.c igc_ptp.c igc_tsn.c igc_xdp.c" +FILES="$FILES igc_base.h igc_diag.c igc_dump.c igc.h igc_i225.c igc_mac.c igc_main.c igc_nvm.h igc_phy.h igc_regs.h igc_tsn.h igc_xdp.h" for f in $FILES; do echo $f @@ -27,6 +23,11 @@ for f in $FILES; do cp -v $o $e op=${f/\./-$PREVER-orig.} ep=${f/\./-$PREVER-ethercat.} - diff -u $op $ep | patch -p1 $e + diff -up $op $ep | patch -p1 --no-backup-if-mismatch $e + sed -i s/$PREVER-ethercat.h/$KERNELVER-ethercat.h/ $e git add $o $e + echo -e "\t$e \\" >> Makefile.am + echo -e "\t$o \\" >> Makefile.am done + +echo "Don't forget to update Makefile.am!" diff --git a/devices/r8169/Kbuild.in b/devices/r8169/Kbuild.in index b9857d7a..1e55f6ef 100644 --- a/devices/r8169/Kbuild.in +++ b/devices/r8169/Kbuild.in @@ -41,6 +41,10 @@ ifeq (@R8169_IN_SUBDIR@,1) r8169_phy_config-@KERNEL_R8169@-ethercat.o CFLAGS_r8169_main-@KERNEL_R8169@-ethercat.o = -DREV=$(REV) + +ifeq (@ENABLE_DRIVER_RESOURCE_VERIFYING@,1) + ccflags-y := -DEC_ENABLE_DRIVER_RESOURCE_VERIFYING +endif endif endif diff --git a/devices/r8169/r8169_main-5.14-ethercat.c b/devices/r8169/r8169_main-5.14-ethercat.c index 51c54f47..287bda91 100644 --- a/devices/r8169/r8169_main-5.14-ethercat.c +++ b/devices/r8169/r8169_main-5.14-ethercat.c @@ -652,11 +652,20 @@ struct rtl8169_private { u32 ocp_base; - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct rtl8169_private *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); MODULE_AUTHOR("Realtek and the Linux r8169 crew "); @@ -1329,7 +1338,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp) static void rtl_irq_enable(struct rtl8169_private *tp) { - if (tp->ecdev) + if (get_ecdev(tp)) return; if (rtl_is_8125(tp)) RTL_W32(tp, IntrMask_8125, tp->irq_mask); @@ -2231,7 +2240,7 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { set_bit(flag, tp->wk.flags); - if (!tp->ecdev) + if (!get_ecdev(tp)) schedule_work(&tp->wk.work); } @@ -3973,7 +3982,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, struct sk_buff *skb = tx_skb->skb; rtl8169_unmap_tx_skb(tp, entry); - if (!tp->ecdev && skb) + if (!get_ecdev(tp) && skb) dev_consume_skb_any(skb); } } @@ -3982,13 +3991,13 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, static void rtl8169_tx_clear(struct rtl8169_private *tp) { rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); - if (!tp->ecdev) + if (!get_ecdev(tp)) netdev_reset_queue(tp->dev); } static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) { - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_disable(&tp->napi); /* Give a racing hard_start_xmit a few cycles to complete. */ @@ -4032,7 +4041,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) { int i; - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_stop_queue(tp->dev); rtl8169_cleanup(tp, false); @@ -4040,7 +4049,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i); - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_enable(&tp->napi); rtl_hw_start(tp); } @@ -4329,7 +4338,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, /* Force memory writes to complete before releasing descriptor */ dma_wmb(); - door_bell = tp->ecdev || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + door_bell = get_ecdev(tp) || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); @@ -4338,7 +4347,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); - stop_queue = !tp->ecdev && !rtl_tx_slots_avail(tp); + stop_queue = !get_ecdev(tp) && !rtl_tx_slots_avail(tp); if (unlikely(stop_queue)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. @@ -4366,13 +4375,13 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, err_dma_1: rtl8169_unmap_tx_skb(tp, entry); err_dma_0: - if (!tp->ecdev) + if (!get_ecdev(tp)) dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; err_stop_0: - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_stop_queue(dev); dev->stats.tx_dropped++; return NETDEV_TX_BUSY; @@ -4479,14 +4488,14 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, if (skb) { pkts_compl++; bytes_compl += skb->len; - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_consume_skb(skb, budget); } dirty_tx++; } if (tp->dirty_tx != dirty_tx) { - if (!tp->ecdev) { + if (!get_ecdev(tp)) { netdev_completed_queue(dev, pkts_compl, bytes_compl); dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); } @@ -4499,7 +4508,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, * ring status. */ smp_store_mb(tp->dirty_tx, dirty_tx); - if (!tp->ecdev && netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) + if (!get_ecdev(tp) && netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) netif_wake_queue(dev); /* * 8168 hack: TxPoll requests are lost when the Tx packets are @@ -4581,7 +4590,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget goto release_descriptor; } - if (!tp->ecdev) { + if (!get_ecdev(tp)) { skb = napi_alloc_skb(&tp->napi, pkt_size); if (unlikely(!skb)) { dev->stats.rx_dropped++; @@ -4596,8 +4605,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); prefetch(rx_buf); - if (tp->ecdev) { - ecdev_receive(tp->ecdev, rx_buf, pkt_size); + if (get_ecdev(tp)) { + ecdev_receive(get_ecdev(tp), rx_buf, pkt_size); // No need to detect link status as // long as frames are received: Reset watchdog. @@ -4610,7 +4619,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget } dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); - if (!tp->ecdev) { + if (!get_ecdev(tp)) { rtl8169_rx_csum(skb, status); skb->protocol = eth_type_trans(skb, dev); @@ -4756,7 +4765,7 @@ static void rtl8169_up(struct rtl8169_private *tp) phy_init_hw(tp->phydev); phy_resume(tp->phydev); rtl8169_init_phy(tp); - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_enable(&tp->napi); set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); rtl_reset_work(tp); @@ -4771,14 +4780,14 @@ static int rtl8169_close(struct net_device *dev) pm_runtime_get_sync(&pdev->dev); - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_stop_queue(dev); rtl8169_down(tp); rtl8169_rx_clear(tp); cancel_work_sync(&tp->wk.work); - if (!tp->ecdev) + if (!get_ecdev(tp)) free_irq(pci_irq_vector(pdev, 0), tp); phy_disconnect(tp->phydev); @@ -4834,7 +4843,7 @@ static int rtl_open(struct net_device *dev) rtl_request_firmware(tp); irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, irqflags, dev->name, tp); if (retval < 0) @@ -4847,10 +4856,10 @@ static int rtl_open(struct net_device *dev) rtl8169_up(tp); rtl8169_init_counter_offsets(tp); - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_start_queue(dev); else - ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + ecdev_set_link(get_ecdev(tp), netif_carrier_ok(dev)); out: pm_runtime_put_sync(&pdev->dev); @@ -4936,7 +4945,7 @@ static int rtl8169_runtime_resume(struct device *dev) static int __maybe_unused rtl8169_suspend(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->ecdev) { + if (get_ecdev(tp)) { return -EBUSY; } @@ -4952,7 +4961,7 @@ static int __maybe_unused rtl8169_suspend(struct device *device) static int __maybe_unused rtl8169_resume(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->ecdev) { + if (get_ecdev(tp)) { return -EBUSY; } @@ -4969,7 +4978,7 @@ static int __maybe_unused rtl8169_resume(struct device *device) static int rtl8169_runtime_suspend(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->ecdev) { + if (get_ecdev(tp)) { return -EBUSY; } @@ -5048,10 +5057,10 @@ static void rtl_remove_one(struct pci_dev *pdev) if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); - if (tp->ecdev) { - ecdev_close(tp->ecdev); + if (get_ecdev(tp)) { + ecdev_close(get_ecdev(tp)); irq_work_sync(&tp->ec_watchdog_kicker); - ecdev_withdraw(tp->ecdev); + ecdev_withdraw(get_ecdev(tp)); } else { unregister_netdev(tp->dev); } @@ -5320,7 +5329,7 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp) static void rtl_init_mac_address(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; - u8 *mac_addr = dev->dev_addr; + u8 mac_addr[ETH_ALEN]; int rc; rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); @@ -5338,6 +5347,12 @@ static void rtl_init_mac_address(struct rtl8169_private *tp) eth_hw_addr_random(dev); dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); done: +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5 + eth_hw_addr_set(dev, mac_addr); +#else + memcpy(dev->dev_addr, mac_addr, sizeof(mac_addr)); +#endif + rtl_rar_set(tp, mac_addr); } @@ -5355,7 +5370,7 @@ static void ec_poll(struct net_device *dev) u16 status = rtl_get_events(tp); if (jiffies - tp->ec_watchdog_jiffies >= 2 * HZ) { - ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + ecdev_set_link(get_ecdev(tp), netif_carrier_ok(dev)); tp->ec_watchdog_jiffies = jiffies; } @@ -5388,6 +5403,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(dev, &pdev->dev); dev->netdev_ops = &rtl_netdev_ops; tp = netdev_priv(dev); + tp->ecdev_initialized = false; tp->dev = dev; tp->pci_dev = pdev; tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; @@ -5544,10 +5560,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - tp->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ecdev_ = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ecdev_initialized = true; tp->ec_watchdog_jiffies = jiffies; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { rc = register_netdev(dev); if (rc) return rc; @@ -5570,11 +5587,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_dev_run_wake(pdev)) pm_runtime_put_sync(&pdev->dev); - if (tp->ecdev) { + if (get_ecdev(tp)) { init_irq_work(&tp->ec_watchdog_kicker, ec_kick_watchdog); - rc = ecdev_open(tp->ecdev); + rc = ecdev_open(get_ecdev(tp)); if (rc) { - ecdev_withdraw(tp->ecdev); + ecdev_withdraw(get_ecdev(tp)); return rc; } } diff --git a/devices/r8169/r8169_main-5.15-ethercat.c b/devices/r8169/r8169_main-5.15-ethercat.c index df956257..0e1f88f9 100644 --- a/devices/r8169/r8169_main-5.15-ethercat.c +++ b/devices/r8169/r8169_main-5.15-ethercat.c @@ -640,11 +640,20 @@ struct rtl8169_private { u32 ocp_base; - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct rtl8169_private *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); MODULE_AUTHOR("Realtek and the Linux r8169 crew "); @@ -1317,7 +1326,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp) static void rtl_irq_enable(struct rtl8169_private *tp) { - if (tp->ecdev) + if (get_ecdev(tp)) return; if (rtl_is_8125(tp)) RTL_W32(tp, IntrMask_8125, tp->irq_mask); @@ -2203,7 +2212,7 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { set_bit(flag, tp->wk.flags); - if (!tp->ecdev) + if (!get_ecdev(tp)) schedule_work(&tp->wk.work); } @@ -3966,7 +3975,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, struct sk_buff *skb = tx_skb->skb; rtl8169_unmap_tx_skb(tp, entry); - if (!tp->ecdev && skb) + if (!get_ecdev(tp) && skb) dev_consume_skb_any(skb); } } @@ -3975,13 +3984,13 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, static void rtl8169_tx_clear(struct rtl8169_private *tp) { rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); - if (!tp->ecdev) + if (!get_ecdev(tp)) netdev_reset_queue(tp->dev); } static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) { - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_disable(&tp->napi); /* Give a racing hard_start_xmit a few cycles to complete. */ @@ -4025,7 +4034,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) { int i; - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_stop_queue(tp->dev); rtl8169_cleanup(tp, false); @@ -4033,7 +4042,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i); - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_enable(&tp->napi); rtl_hw_start(tp); } @@ -4322,7 +4331,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, /* Force memory writes to complete before releasing descriptor */ dma_wmb(); - door_bell = tp->ecdev || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + door_bell = get_ecdev(tp) || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); @@ -4331,7 +4340,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); - stop_queue = !tp->ecdev && !rtl_tx_slots_avail(tp); + stop_queue = !get_ecdev(tp) && !rtl_tx_slots_avail(tp); if (unlikely(stop_queue)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. @@ -4359,13 +4368,13 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, err_dma_1: rtl8169_unmap_tx_skb(tp, entry); err_dma_0: - if (!tp->ecdev) + if (!get_ecdev(tp)) dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; err_stop_0: - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_stop_queue(dev); dev->stats.tx_dropped++; return NETDEV_TX_BUSY; @@ -4472,14 +4481,14 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, if (skb) { pkts_compl++; bytes_compl += skb->len; - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_consume_skb(skb, budget); } dirty_tx++; } if (tp->dirty_tx != dirty_tx) { - if (!tp->ecdev) { + if (!get_ecdev(tp)) { netdev_completed_queue(dev, pkts_compl, bytes_compl); dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); } @@ -4492,7 +4501,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, * ring status. */ smp_store_mb(tp->dirty_tx, dirty_tx); - if (!tp->ecdev && netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) + if (!get_ecdev(tp) && netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) netif_wake_queue(dev); /* * 8168 hack: TxPoll requests are lost when the Tx packets are @@ -4574,7 +4583,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget goto release_descriptor; } - if (!tp->ecdev) { + if (!get_ecdev(tp)) { skb = napi_alloc_skb(&tp->napi, pkt_size); if (unlikely(!skb)) { dev->stats.rx_dropped++; @@ -4589,8 +4598,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); prefetch(rx_buf); - if (tp->ecdev) { - ecdev_receive(tp->ecdev, rx_buf, pkt_size); + if (get_ecdev(tp)) { + ecdev_receive(get_ecdev(tp), rx_buf, pkt_size); // No need to detect link status as // long as frames are received: Reset watchdog. @@ -4603,7 +4612,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget } dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); - if (!tp->ecdev) { + if (!get_ecdev(tp)) { rtl8169_rx_csum(skb, status); skb->protocol = eth_type_trans(skb, dev); @@ -4749,7 +4758,7 @@ static void rtl8169_up(struct rtl8169_private *tp) phy_init_hw(tp->phydev); phy_resume(tp->phydev); rtl8169_init_phy(tp); - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_enable(&tp->napi); set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); rtl_reset_work(tp); @@ -4764,14 +4773,14 @@ static int rtl8169_close(struct net_device *dev) pm_runtime_get_sync(&pdev->dev); - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_stop_queue(dev); rtl8169_down(tp); rtl8169_rx_clear(tp); cancel_work_sync(&tp->wk.work); - if (!tp->ecdev) + if (!get_ecdev(tp)) free_irq(pci_irq_vector(pdev, 0), tp); phy_disconnect(tp->phydev); @@ -4827,7 +4836,7 @@ static int rtl_open(struct net_device *dev) rtl_request_firmware(tp); irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, irqflags, dev->name, tp); if (retval < 0) @@ -4840,10 +4849,10 @@ static int rtl_open(struct net_device *dev) rtl8169_up(tp); rtl8169_init_counter_offsets(tp); - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_start_queue(dev); else - ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + ecdev_set_link(get_ecdev(tp), netif_carrier_ok(dev)); out: pm_runtime_put_sync(&pdev->dev); @@ -4929,7 +4938,7 @@ static int rtl8169_runtime_resume(struct device *dev) static int __maybe_unused rtl8169_suspend(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->ecdev) { + if (get_ecdev(tp)) { return -EBUSY; } @@ -4945,7 +4954,7 @@ static int __maybe_unused rtl8169_suspend(struct device *device) static int __maybe_unused rtl8169_resume(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->ecdev) { + if (get_ecdev(tp)) { return -EBUSY; } @@ -4962,7 +4971,7 @@ static int __maybe_unused rtl8169_resume(struct device *device) static int rtl8169_runtime_suspend(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->ecdev) { + if (get_ecdev(tp)) { return -EBUSY; } @@ -5041,10 +5050,10 @@ static void rtl_remove_one(struct pci_dev *pdev) if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); - if (tp->ecdev) { - ecdev_close(tp->ecdev); + if (get_ecdev(tp)) { + ecdev_close(get_ecdev(tp)); irq_work_sync(&tp->ec_watchdog_kicker); - ecdev_withdraw(tp->ecdev); + ecdev_withdraw(get_ecdev(tp)); } else { unregister_netdev(tp->dev); } @@ -5348,7 +5357,7 @@ static void ec_poll(struct net_device *dev) u16 status = rtl_get_events(tp); if (jiffies - tp->ec_watchdog_jiffies >= 2 * HZ) { - ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + ecdev_set_link(get_ecdev(tp), netif_carrier_ok(dev)); tp->ec_watchdog_jiffies = jiffies; } @@ -5381,6 +5390,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(dev, &pdev->dev); dev->netdev_ops = &rtl_netdev_ops; tp = netdev_priv(dev); + tp->ecdev_initialized = false; tp->dev = dev; tp->pci_dev = pdev; tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; @@ -5536,10 +5546,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - tp->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ecdev_ = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ecdev_initialized = true; tp->ec_watchdog_jiffies = jiffies; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { rc = register_netdev(dev); if (rc) return rc; @@ -5562,11 +5573,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_dev_run_wake(pdev)) pm_runtime_put_sync(&pdev->dev); - if (tp->ecdev) { + if (get_ecdev(tp)) { init_irq_work(&tp->ec_watchdog_kicker, ec_kick_watchdog); - rc = ecdev_open(tp->ecdev); + rc = ecdev_open(get_ecdev(tp)); if (rc) { - ecdev_withdraw(tp->ecdev); + ecdev_withdraw(get_ecdev(tp)); return rc; } } diff --git a/devices/r8169/r8169_main-6.1-ethercat.c b/devices/r8169/r8169_main-6.1-ethercat.c index 06e02bc6..3f4a77ca 100644 --- a/devices/r8169/r8169_main-6.1-ethercat.c +++ b/devices/r8169/r8169_main-6.1-ethercat.c @@ -629,11 +629,20 @@ struct rtl8169_private { u32 ocp_base; - ec_device_t *ecdev; + ec_device_t *ecdev_; unsigned long ec_watchdog_jiffies; struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; }; +static inline ec_device_t *get_ecdev(struct rtl8169_private *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); MODULE_AUTHOR("Realtek and the Linux r8169 crew "); @@ -1269,7 +1278,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp) static void rtl_irq_enable(struct rtl8169_private *tp) { - if (tp->ecdev) + if (get_ecdev(tp)) return; if (rtl_is_8125(tp)) RTL_W32(tp, IntrMask_8125, tp->irq_mask); @@ -2159,7 +2168,7 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { set_bit(flag, tp->wk.flags); - if (!tp->ecdev) + if (!get_ecdev(tp)) schedule_work(&tp->wk.work); } @@ -3871,7 +3880,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, struct sk_buff *skb = tx_skb->skb; rtl8169_unmap_tx_skb(tp, entry); - if (!tp->ecdev && skb) + if (!get_ecdev(tp) && skb) dev_consume_skb_any(skb); } } @@ -3880,13 +3889,13 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, static void rtl8169_tx_clear(struct rtl8169_private *tp) { rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); - if (!tp->ecdev) + if (!get_ecdev(tp)) netdev_reset_queue(tp->dev); } static void rtl8169_cleanup(struct rtl8169_private *tp) { - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_disable(&tp->napi); /* Give a racing hard_start_xmit a few cycles to complete. */ @@ -3926,7 +3935,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) { int i; - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_stop_queue(tp->dev); rtl8169_cleanup(tp); @@ -3934,7 +3943,7 @@ static void rtl_reset_work(struct rtl8169_private *tp) for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i); - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_enable(&tp->napi); rtl_hw_start(tp); } @@ -4221,7 +4230,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, /* Force memory writes to complete before releasing descriptor */ dma_wmb(); - door_bell = tp->ecdev || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + door_bell = get_ecdev(tp) || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); @@ -4230,7 +4239,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); - stop_queue = !tp->ecdev && !rtl_tx_slots_avail(tp); + stop_queue = !get_ecdev(tp) && !rtl_tx_slots_avail(tp); if (unlikely(stop_queue)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. @@ -4258,13 +4267,13 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, err_dma_1: rtl8169_unmap_tx_skb(tp, entry); err_dma_0: - if (!tp->ecdev) + if (!get_ecdev(tp)) dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; err_stop_0: - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_stop_queue(dev); dev->stats.tx_dropped++; return NETDEV_TX_BUSY; @@ -4370,14 +4379,14 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, if (skb) { pkts_compl++; bytes_compl += skb->len; - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_consume_skb(skb, budget); } dirty_tx++; } if (tp->dirty_tx != dirty_tx) { - if (!tp->ecdev) { + if (!get_ecdev(tp)) { netdev_completed_queue(dev, pkts_compl, bytes_compl); dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); } @@ -4390,7 +4399,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, * ring status. */ smp_store_mb(tp->dirty_tx, dirty_tx); - if (!tp->ecdev && netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) + if (!get_ecdev(tp) && netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) netif_wake_queue(dev); /* * 8168 hack: TxPoll requests are lost when the Tx packets are @@ -4472,7 +4481,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget goto release_descriptor; } - if (!tp->ecdev) { + if (!get_ecdev(tp)) { skb = napi_alloc_skb(&tp->napi, pkt_size); if (unlikely(!skb)) { dev->stats.rx_dropped++; @@ -4487,8 +4496,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); prefetch(rx_buf); - if (tp->ecdev) { - ecdev_receive(tp->ecdev, rx_buf, pkt_size); + if (get_ecdev(tp)) { + ecdev_receive(get_ecdev(tp), rx_buf, pkt_size); // No need to detect link status as // long as frames are received: Reset watchdog. @@ -4501,7 +4510,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget } dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); - if (!tp->ecdev) { + if (!get_ecdev(tp)) { rtl8169_rx_csum(skb, status); skb->protocol = eth_type_trans(skb, dev); @@ -4647,7 +4656,7 @@ static void rtl8169_up(struct rtl8169_private *tp) phy_init_hw(tp->phydev); phy_resume(tp->phydev); rtl8169_init_phy(tp); - if (!tp->ecdev) + if (!get_ecdev(tp)) napi_enable(&tp->napi); set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); rtl_reset_work(tp); @@ -4662,14 +4671,14 @@ static int rtl8169_close(struct net_device *dev) pm_runtime_get_sync(&pdev->dev); - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_stop_queue(dev); rtl8169_down(tp); rtl8169_rx_clear(tp); cancel_work_sync(&tp->wk.work); - if (!tp->ecdev) + if (!get_ecdev(tp)) free_irq(tp->irq, tp); phy_disconnect(tp->phydev); @@ -4725,7 +4734,7 @@ static int rtl_open(struct net_device *dev) rtl_request_firmware(tp); irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { retval = request_irq(tp->irq, rtl8169_interrupt, irqflags, dev->name, tp); if (retval < 0) @@ -4738,10 +4747,10 @@ static int rtl_open(struct net_device *dev) rtl8169_up(tp); rtl8169_init_counter_offsets(tp); - if (!tp->ecdev) + if (!get_ecdev(tp)) netif_start_queue(dev); else - ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + ecdev_set_link(get_ecdev(tp), netif_carrier_ok(dev)); out: pm_runtime_put_sync(&pdev->dev); @@ -4825,7 +4834,7 @@ static int rtl8169_runtime_resume(struct device *dev) static int rtl8169_suspend(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->ecdev) { + if (get_ecdev(tp)) { return -EBUSY; } @@ -4841,7 +4850,7 @@ static int rtl8169_suspend(struct device *device) static int rtl8169_resume(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->ecdev) { + if (get_ecdev(tp)) { return -EBUSY; } @@ -4858,7 +4867,7 @@ static int rtl8169_resume(struct device *device) static int rtl8169_runtime_suspend(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->ecdev) { + if (get_ecdev(tp)) { return -EBUSY; } @@ -4919,10 +4928,10 @@ static void rtl_remove_one(struct pci_dev *pdev) if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); - if (tp->ecdev) { - ecdev_close(tp->ecdev); + if (get_ecdev(tp)) { + ecdev_close(get_ecdev(tp)); irq_work_sync(&tp->ec_watchdog_kicker); - ecdev_withdraw(tp->ecdev); + ecdev_withdraw(get_ecdev(tp)); } else { unregister_netdev(tp->dev); } @@ -5206,7 +5215,7 @@ static void ec_poll(struct net_device *dev) u16 status = rtl_get_events(tp); if (jiffies - tp->ec_watchdog_jiffies >= 2 * HZ) { - ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + ecdev_set_link(get_ecdev(tp), netif_carrier_ok(dev)); tp->ec_watchdog_jiffies = jiffies; } @@ -5239,6 +5248,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(dev, &pdev->dev); dev->netdev_ops = &rtl_netdev_ops; tp = netdev_priv(dev); + tp->ecdev_initialized = false; tp->dev = dev; tp->pci_dev = pdev; tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; @@ -5401,10 +5411,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - tp->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ecdev_ = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ecdev_initialized = true; tp->ec_watchdog_jiffies = jiffies; - if (!tp->ecdev) { + if (!get_ecdev(tp)) { rc = register_netdev(dev); if (rc) return rc; @@ -5426,11 +5437,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_dev_run_wake(pdev)) pm_runtime_put_sync(&pdev->dev); - if (tp->ecdev) { + if (get_ecdev(tp)) { + rc = ecdev_open(get_ecdev(tp)); init_irq_work(&tp->ec_watchdog_kicker, ec_kick_watchdog); - rc = ecdev_open(tp->ecdev); if (rc) { - ecdev_withdraw(tp->ecdev); + ecdev_withdraw(get_ecdev(tp)); return rc; } } diff --git a/devices/stmmac/Kbuild.in b/devices/stmmac/Kbuild.in new file mode 100644 index 00000000..60564fb4 --- /dev/null +++ b/devices/stmmac/Kbuild.in @@ -0,0 +1,98 @@ +#----------------------------------------------------------------------------- +# +# $Id$ +# +# Copyright (C) 2006-2008 Florian Pose, Ingenieurgemeinschaft IgH +# +# This file is part of the IgH EtherCAT Master. +# +# The IgH EtherCAT Master is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License version 2, as +# published by the Free Software Foundation. +# +# The IgH EtherCAT Master is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the IgH EtherCAT Master; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# --- +# +# The license mentioned above concerns the source code only. Using the +# EtherCAT technology and brand is only permitted in compliance with the +# industrial property and similar rights of Beckhoff Automation GmbH. +# +# --- +# +# vim: syntax=make +# +#----------------------------------------------------------------------------- + +TOPDIR := $(src)/../.. + +REV := $(shell if test -s $(TOPDIR)/revision; then \ + cat $(TOPDIR)/revision; \ + else \ + git -C $(TOPDIR) describe 2>/dev/null || echo "unknown"; \ + fi) + +ifeq (@ENABLE_DRIVER_RESOURCE_VERIFYING@,1) + ccflags-y := -DEC_ENABLE_DRIVER_RESOURCE_VERIFYING +endif + +ifeq (@ENABLE_STMMAC@,1) +EC_STMMAC_OBJS := \ + chain_mode-@KERNEL_STMMAC@-ethercat.o \ + dwmac1000_core-@KERNEL_STMMAC@-ethercat.o \ + dwmac1000_dma-@KERNEL_STMMAC@-ethercat.o \ + dwmac100_core-@KERNEL_STMMAC@-ethercat.o \ + dwmac100_dma-@KERNEL_STMMAC@-ethercat.o \ + dwmac4_core-@KERNEL_STMMAC@-ethercat.o \ + dwmac4_descs-@KERNEL_STMMAC@-ethercat.o \ + dwmac4_dma-@KERNEL_STMMAC@-ethercat.o \ + dwmac4_lib-@KERNEL_STMMAC@-ethercat.o \ + dwmac5-@KERNEL_STMMAC@-ethercat.o \ + dwmac_lib-@KERNEL_STMMAC@-ethercat.o \ + dwxgmac2_core-@KERNEL_STMMAC@-ethercat.o \ + dwxgmac2_descs-@KERNEL_STMMAC@-ethercat.o \ + dwxgmac2_dma-@KERNEL_STMMAC@-ethercat.o \ + enh_desc-@KERNEL_STMMAC@-ethercat.o \ + hwif-@KERNEL_STMMAC@-ethercat.o \ + mmc_core-@KERNEL_STMMAC@-ethercat.o \ + norm_desc-@KERNEL_STMMAC@-ethercat.o \ + ring_mode-@KERNEL_STMMAC@-ethercat.o \ + stmmac_ethtool-@KERNEL_STMMAC@-ethercat.o \ + stmmac_hwtstamp-@KERNEL_STMMAC@-ethercat.o \ + stmmac_main-@KERNEL_STMMAC@-ethercat.o \ + stmmac_mdio-@KERNEL_STMMAC@-ethercat.o \ + stmmac_ptp-@KERNEL_STMMAC@-ethercat.o \ + stmmac_tc-@KERNEL_STMMAC@-ethercat.o \ + stmmac_xdp-@KERNEL_STMMAC@-ethercat.o \ + $(ec_stmmac-y) + +ec_stmmac-$(CONFIG_STMMAC_SELFTESTS) = \ + stmmac_selftests-@KERNEL_STMMAC@-ethercat.o + +ifeq (@ENABLE_DWMACINTEL@,1) + obj-m += ec_dwmac-intel.o + ec_dwmac-intel-objs := \ + $(EC_STMMAC_OBJS) \ + dwmac-intel-@KERNEL_STMMAC@-ethercat.o +endif + +ifeq (@ENABLE_STMMACPCI@,1) + obj-m += ec_stmmac-pci.o + ec_stmmac-pci-objs := \ + $(EC_STMMAC_OBJS) \ + stmmac_pci-@KERNEL_STMMAC@-ethercat.o +endif +endif + +KBUILD_EXTRA_SYMBOLS := \ + @abs_top_builddir@/$(LINUX_SYMVERS) \ + @abs_top_builddir@/master/$(LINUX_SYMVERS) + +#----------------------------------------------------------------------------- diff --git a/devices/stmmac/Makefile.am b/devices/stmmac/Makefile.am new file mode 100644 index 00000000..00add3a3 --- /dev/null +++ b/devices/stmmac/Makefile.am @@ -0,0 +1,120 @@ +#----------------------------------------------------------------------------- +# +# Copyright (C) 2006-2021 Florian Pose, Ingenieurgemeinschaft IgH +# +# This file is part of the IgH EtherCAT Master. +# +# The IgH EtherCAT Master is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License version 2, as +# published by the Free Software Foundation. +# +# The IgH EtherCAT Master is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the IgH EtherCAT Master; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +#----------------------------------------------------------------------------- + +include $(top_srcdir)/Makefile.kbuild + +EXTRA_DIST = \ + chain_mode-6.1-ethercat.c \ + chain_mode-6.1-orig.c \ + common-6.1-ethercat.h \ + common-6.1-orig.h \ + descs-6.1-ethercat.h \ + descs-6.1-orig.h \ + descs_com-6.1-ethercat.h \ + descs_com-6.1-orig.h \ + dwmac1000-6.1-ethercat.h \ + dwmac1000-6.1-orig.h \ + dwmac1000_core-6.1-ethercat.c \ + dwmac1000_core-6.1-orig.c \ + dwmac1000_dma-6.1-ethercat.c \ + dwmac1000_dma-6.1-orig.c \ + dwmac100-6.1-ethercat.h \ + dwmac100-6.1-orig.h \ + dwmac100_core-6.1-ethercat.c \ + dwmac100_core-6.1-orig.c \ + dwmac100_dma-6.1-ethercat.c \ + dwmac100_dma-6.1-orig.c \ + dwmac4-6.1-ethercat.h \ + dwmac4-6.1-orig.h \ + dwmac4_core-6.1-ethercat.c \ + dwmac4_core-6.1-orig.c \ + dwmac4_descs-6.1-ethercat.c \ + dwmac4_descs-6.1-ethercat.h \ + dwmac4_descs-6.1-orig.c \ + dwmac4_descs-6.1-orig.h \ + dwmac4_dma-6.1-ethercat.c \ + dwmac4_dma-6.1-ethercat.h \ + dwmac4_dma-6.1-orig.c \ + dwmac4_dma-6.1-orig.h \ + dwmac4_lib-6.1-ethercat.c \ + dwmac4_lib-6.1-orig.c \ + dwmac5-6.1-ethercat.c \ + dwmac5-6.1-ethercat.h \ + dwmac5-6.1-orig.c \ + dwmac5-6.1-orig.h \ + dwmac_dma-6.1-ethercat.h \ + dwmac_dma-6.1-orig.h \ + dwmac-intel-6.1-ethercat.c \ + dwmac-intel-6.1-ethercat.h \ + dwmac-intel-6.1-orig.c \ + dwmac-intel-6.1-orig.h \ + dwmac_lib-6.1-ethercat.c \ + dwmac_lib-6.1-orig.c \ + dwxgmac2-6.1-ethercat.h \ + dwxgmac2-6.1-orig.h \ + dwxgmac2_core-6.1-ethercat.c \ + dwxgmac2_core-6.1-orig.c \ + dwxgmac2_descs-6.1-ethercat.c \ + dwxgmac2_descs-6.1-orig.c \ + dwxgmac2_dma-6.1-ethercat.c \ + dwxgmac2_dma-6.1-orig.c \ + dwxlgmac2-6.1-ethercat.h \ + dwxlgmac2-6.1-orig.h \ + enh_desc-6.1-ethercat.c \ + enh_desc-6.1-orig.c \ + hwif-6.1-ethercat.c \ + hwif-6.1-ethercat.h \ + hwif-6.1-orig.c \ + hwif-6.1-orig.h \ + mmc-6.1-ethercat.h \ + mmc-6.1-orig.h \ + mmc_core-6.1-ethercat.c \ + mmc_core-6.1-orig.c \ + norm_desc-6.1-ethercat.c \ + norm_desc-6.1-orig.c \ + ring_mode-6.1-ethercat.c \ + ring_mode-6.1-orig.c \ + stmmac-6.1-ethercat.h \ + stmmac-6.1-orig.h \ + stmmac_ethtool-6.1-ethercat.c \ + stmmac_ethtool-6.1-orig.c \ + stmmac_hwtstamp-6.1-ethercat.c \ + stmmac_hwtstamp-6.1-orig.c \ + stmmac_main-6.1-ethercat.c \ + stmmac_main-6.1-orig.c \ + stmmac_mdio-6.1-ethercat.c \ + stmmac_mdio-6.1-orig.c \ + stmmac_pci-6.1-ethercat.c \ + stmmac_pci-6.1-orig.c \ + stmmac_pcs-6.1-ethercat.h \ + stmmac_pcs-6.1-orig.h \ + stmmac_ptp-6.1-ethercat.c \ + stmmac_ptp-6.1-ethercat.h \ + stmmac_ptp-6.1-orig.c \ + stmmac_ptp-6.1-orig.h \ + stmmac_tc-6.1-ethercat.c \ + stmmac_tc-6.1-orig.c \ + stmmac_xdp-6.1-ethercat.c \ + stmmac_xdp-6.1-ethercat.h \ + stmmac_xdp-6.1-orig.c \ + stmmac_xdp-6.1-orig.h + +#----------------------------------------------------------------------------- diff --git a/devices/stmmac/chain_mode-6.1-ethercat.c b/devices/stmmac/chain_mode-6.1-ethercat.c new file mode 100644 index 00000000..f2a5a365 --- /dev/null +++ b/devices/stmmac/chain_mode-6.1-ethercat.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + Specialised functions for managing Chained mode + + Copyright(C) 2011 STMicroelectronics Ltd + + It defines all the functions used to handle the normal/enhanced + descriptors in case of the DMA is configured to work in chained or + in ring mode. + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include "stmmac-6.1-ethercat.h" + +static int jumbo_frm(void *p, struct sk_buff *skb, int csum) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; + unsigned int nopaged_len = skb_headlen(skb); + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->cur_tx; + unsigned int bmax, des2; + unsigned int i = 1, len; + struct dma_desc *desc; + + desc = tx_q->dma_tx + entry; + + if (priv->plat->enh_desc) + bmax = BUF_SIZE_8KiB; + else + bmax = BUF_SIZE_2KiB; + + len = nopaged_len - bmax; + + des2 = dma_map_single(priv->device, skb->data, + bmax, DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = bmax; + /* do not close the descriptor and do not set own bit */ + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE, + 0, false, skb->len); + + while (len != 0) { + tx_q->tx_skbuff[entry] = NULL; + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + desc = tx_q->dma_tx + entry; + + if (len > bmax) { + des2 = dma_map_single(priv->device, + (skb->data + bmax * i), + bmax, DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = bmax; + stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum, + STMMAC_CHAIN_MODE, 1, false, skb->len); + len -= bmax; + i++; + } else { + des2 = dma_map_single(priv->device, + (skb->data + bmax * i), len, + DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = len; + /* last descriptor can be set now */ + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, + STMMAC_CHAIN_MODE, 1, true, skb->len); + len = 0; + } + } + + tx_q->cur_tx = entry; + + return entry; +} + +static unsigned int is_jumbo_frm(int len, int enh_desc) +{ + unsigned int ret = 0; + + if ((enh_desc && (len > BUF_SIZE_8KiB)) || + (!enh_desc && (len > BUF_SIZE_2KiB))) { + ret = 1; + } + + return ret; +} + +static void init_dma_chain(void *des, dma_addr_t phy_addr, + unsigned int size, unsigned int extend_desc) +{ + /* + * In chained mode the des3 points to the next element in the ring. + * The latest element has to point to the head. + */ + int i; + dma_addr_t dma_phy = phy_addr; + + if (extend_desc) { + struct dma_extended_desc *p = (struct dma_extended_desc *)des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_extended_desc); + p->basic.des3 = cpu_to_le32((unsigned int)dma_phy); + p++; + } + p->basic.des3 = cpu_to_le32((unsigned int)phy_addr); + + } else { + struct dma_desc *p = (struct dma_desc *)des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_desc); + p->des3 = cpu_to_le32((unsigned int)dma_phy); + p++; + } + p->des3 = cpu_to_le32((unsigned int)phy_addr); + } +} + +static void refill_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr; + struct stmmac_priv *priv = rx_q->priv_data; + + if (priv->hwts_rx_en && !priv->extend_desc) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + + (((rx_q->dirty_rx) + 1) % + priv->dma_conf.dma_rx_size) * + sizeof(struct dma_desc))); +} + +static void clean_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->dirty_tx; + + if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && + priv->hwts_tx_en) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + + ((tx_q->dirty_tx + 1) % + priv->dma_conf.dma_tx_size)) + * sizeof(struct dma_desc))); +} + +const struct stmmac_mode_ops chain_mode_ops = { + .init = init_dma_chain, + .is_jumbo_frm = is_jumbo_frm, + .jumbo_frm = jumbo_frm, + .refill_desc3 = refill_desc3, + .clean_desc3 = clean_desc3, +}; diff --git a/devices/stmmac/chain_mode-6.1-orig.c b/devices/stmmac/chain_mode-6.1-orig.c new file mode 100644 index 00000000..2e8744ac --- /dev/null +++ b/devices/stmmac/chain_mode-6.1-orig.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + Specialised functions for managing Chained mode + + Copyright(C) 2011 STMicroelectronics Ltd + + It defines all the functions used to handle the normal/enhanced + descriptors in case of the DMA is configured to work in chained or + in ring mode. + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include "stmmac.h" + +static int jumbo_frm(void *p, struct sk_buff *skb, int csum) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; + unsigned int nopaged_len = skb_headlen(skb); + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->cur_tx; + unsigned int bmax, des2; + unsigned int i = 1, len; + struct dma_desc *desc; + + desc = tx_q->dma_tx + entry; + + if (priv->plat->enh_desc) + bmax = BUF_SIZE_8KiB; + else + bmax = BUF_SIZE_2KiB; + + len = nopaged_len - bmax; + + des2 = dma_map_single(priv->device, skb->data, + bmax, DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = bmax; + /* do not close the descriptor and do not set own bit */ + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE, + 0, false, skb->len); + + while (len != 0) { + tx_q->tx_skbuff[entry] = NULL; + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + desc = tx_q->dma_tx + entry; + + if (len > bmax) { + des2 = dma_map_single(priv->device, + (skb->data + bmax * i), + bmax, DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = bmax; + stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum, + STMMAC_CHAIN_MODE, 1, false, skb->len); + len -= bmax; + i++; + } else { + des2 = dma_map_single(priv->device, + (skb->data + bmax * i), len, + DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = len; + /* last descriptor can be set now */ + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, + STMMAC_CHAIN_MODE, 1, true, skb->len); + len = 0; + } + } + + tx_q->cur_tx = entry; + + return entry; +} + +static unsigned int is_jumbo_frm(int len, int enh_desc) +{ + unsigned int ret = 0; + + if ((enh_desc && (len > BUF_SIZE_8KiB)) || + (!enh_desc && (len > BUF_SIZE_2KiB))) { + ret = 1; + } + + return ret; +} + +static void init_dma_chain(void *des, dma_addr_t phy_addr, + unsigned int size, unsigned int extend_desc) +{ + /* + * In chained mode the des3 points to the next element in the ring. + * The latest element has to point to the head. + */ + int i; + dma_addr_t dma_phy = phy_addr; + + if (extend_desc) { + struct dma_extended_desc *p = (struct dma_extended_desc *)des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_extended_desc); + p->basic.des3 = cpu_to_le32((unsigned int)dma_phy); + p++; + } + p->basic.des3 = cpu_to_le32((unsigned int)phy_addr); + + } else { + struct dma_desc *p = (struct dma_desc *)des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_desc); + p->des3 = cpu_to_le32((unsigned int)dma_phy); + p++; + } + p->des3 = cpu_to_le32((unsigned int)phy_addr); + } +} + +static void refill_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr; + struct stmmac_priv *priv = rx_q->priv_data; + + if (priv->hwts_rx_en && !priv->extend_desc) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + + (((rx_q->dirty_rx) + 1) % + priv->dma_conf.dma_rx_size) * + sizeof(struct dma_desc))); +} + +static void clean_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->dirty_tx; + + if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && + priv->hwts_tx_en) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + + ((tx_q->dirty_tx + 1) % + priv->dma_conf.dma_tx_size)) + * sizeof(struct dma_desc))); +} + +const struct stmmac_mode_ops chain_mode_ops = { + .init = init_dma_chain, + .is_jumbo_frm = is_jumbo_frm, + .jumbo_frm = jumbo_frm, + .refill_desc3 = refill_desc3, + .clean_desc3 = clean_desc3, +}; diff --git a/devices/stmmac/common-6.1-ethercat.h b/devices/stmmac/common-6.1-ethercat.h new file mode 100644 index 00000000..9ae013a2 --- /dev/null +++ b/devices/stmmac/common-6.1-ethercat.h @@ -0,0 +1,568 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + STMMAC Common Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __COMMON_H__ +#define __COMMON_H__ + +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#define STMMAC_VLAN_TAG_USED +#include +#endif + +#include "descs-6.1-ethercat.h" +#include "hwif-6.1-ethercat.h" +#include "mmc-6.1-ethercat.h" + +/* Synopsys Core versions */ +#define DWMAC_CORE_3_40 0x34 +#define DWMAC_CORE_3_50 0x35 +#define DWMAC_CORE_4_00 0x40 +#define DWMAC_CORE_4_10 0x41 +#define DWMAC_CORE_5_00 0x50 +#define DWMAC_CORE_5_10 0x51 +#define DWMAC_CORE_5_20 0x52 +#define DWXGMAC_CORE_2_10 0x21 +#define DWXLGMAC_CORE_2_00 0x20 + +/* Device ID */ +#define DWXGMAC_ID 0x76 +#define DWXLGMAC_ID 0x27 + +#define STMMAC_CHAN0 0 /* Always supported and default for all chips */ + +/* TX and RX Descriptor Length, these need to be power of two. + * TX descriptor length less than 64 may cause transmit queue timed out error. + * RX descriptor length less than 64 may cause inconsistent Rx chain error. + */ +#define DMA_MIN_TX_SIZE 64 +#define DMA_MAX_TX_SIZE 1024 +#define DMA_DEFAULT_TX_SIZE 512 +#define DMA_MIN_RX_SIZE 64 +#define DMA_MAX_RX_SIZE 1024 +#define DMA_DEFAULT_RX_SIZE 512 +#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) + +#undef FRAME_FILTER_DEBUG +/* #define FRAME_FILTER_DEBUG */ + +struct stmmac_txq_stats { + unsigned long tx_pkt_n; + unsigned long tx_normal_irq_n; +}; + +struct stmmac_rxq_stats { + unsigned long rx_pkt_n; + unsigned long rx_normal_irq_n; +}; + +/* Extra statistic and debug information exposed by ethtool */ +struct stmmac_extra_stats { + /* Transmit errors */ + unsigned long tx_underflow ____cacheline_aligned; + unsigned long tx_carrier; + unsigned long tx_losscarrier; + unsigned long vlan_tag; + unsigned long tx_deferred; + unsigned long tx_vlan; + unsigned long tx_jabber; + unsigned long tx_frame_flushed; + unsigned long tx_payload_error; + unsigned long tx_ip_header_error; + /* Receive errors */ + unsigned long rx_desc; + unsigned long sa_filter_fail; + unsigned long overflow_error; + unsigned long ipc_csum_error; + unsigned long rx_collision; + unsigned long rx_crc_errors; + unsigned long dribbling_bit; + unsigned long rx_length; + unsigned long rx_mii; + unsigned long rx_multicast; + unsigned long rx_gmac_overflow; + unsigned long rx_watchdog; + unsigned long da_rx_filter_fail; + unsigned long sa_rx_filter_fail; + unsigned long rx_missed_cntr; + unsigned long rx_overflow_cntr; + unsigned long rx_vlan; + unsigned long rx_split_hdr_pkt_n; + /* Tx/Rx IRQ error info */ + unsigned long tx_undeflow_irq; + unsigned long tx_process_stopped_irq; + unsigned long tx_jabber_irq; + unsigned long rx_overflow_irq; + unsigned long rx_buf_unav_irq; + unsigned long rx_process_stopped_irq; + unsigned long rx_watchdog_irq; + unsigned long tx_early_irq; + unsigned long fatal_bus_error_irq; + /* Tx/Rx IRQ Events */ + unsigned long rx_early_irq; + unsigned long threshold; + unsigned long tx_pkt_n; + unsigned long rx_pkt_n; + unsigned long normal_irq_n; + unsigned long rx_normal_irq_n; + unsigned long napi_poll; + unsigned long tx_normal_irq_n; + unsigned long tx_clean; + unsigned long tx_set_ic_bit; + unsigned long irq_receive_pmt_irq_n; + /* MMC info */ + unsigned long mmc_tx_irq_n; + unsigned long mmc_rx_irq_n; + unsigned long mmc_rx_csum_offload_irq_n; + /* EEE */ + unsigned long irq_tx_path_in_lpi_mode_n; + unsigned long irq_tx_path_exit_lpi_mode_n; + unsigned long irq_rx_path_in_lpi_mode_n; + unsigned long irq_rx_path_exit_lpi_mode_n; + unsigned long phy_eee_wakeup_error_n; + /* Extended RDES status */ + unsigned long ip_hdr_err; + unsigned long ip_payload_err; + unsigned long ip_csum_bypassed; + unsigned long ipv4_pkt_rcvd; + unsigned long ipv6_pkt_rcvd; + unsigned long no_ptp_rx_msg_type_ext; + unsigned long ptp_rx_msg_type_sync; + unsigned long ptp_rx_msg_type_follow_up; + unsigned long ptp_rx_msg_type_delay_req; + unsigned long ptp_rx_msg_type_delay_resp; + unsigned long ptp_rx_msg_type_pdelay_req; + unsigned long ptp_rx_msg_type_pdelay_resp; + unsigned long ptp_rx_msg_type_pdelay_follow_up; + unsigned long ptp_rx_msg_type_announce; + unsigned long ptp_rx_msg_type_management; + unsigned long ptp_rx_msg_pkt_reserved_type; + unsigned long ptp_frame_type; + unsigned long ptp_ver; + unsigned long timestamp_dropped; + unsigned long av_pkt_rcvd; + unsigned long av_tagged_pkt_rcvd; + unsigned long vlan_tag_priority_val; + unsigned long l3_filter_match; + unsigned long l4_filter_match; + unsigned long l3_l4_filter_no_match; + /* PCS */ + unsigned long irq_pcs_ane_n; + unsigned long irq_pcs_link_n; + unsigned long irq_rgmii_n; + unsigned long pcs_link; + unsigned long pcs_duplex; + unsigned long pcs_speed; + /* debug register */ + unsigned long mtl_tx_status_fifo_full; + unsigned long mtl_tx_fifo_not_empty; + unsigned long mmtl_fifo_ctrl; + unsigned long mtl_tx_fifo_read_ctrl_write; + unsigned long mtl_tx_fifo_read_ctrl_wait; + unsigned long mtl_tx_fifo_read_ctrl_read; + unsigned long mtl_tx_fifo_read_ctrl_idle; + unsigned long mac_tx_in_pause; + unsigned long mac_tx_frame_ctrl_xfer; + unsigned long mac_tx_frame_ctrl_idle; + unsigned long mac_tx_frame_ctrl_wait; + unsigned long mac_tx_frame_ctrl_pause; + unsigned long mac_gmii_tx_proto_engine; + unsigned long mtl_rx_fifo_fill_level_full; + unsigned long mtl_rx_fifo_fill_above_thresh; + unsigned long mtl_rx_fifo_fill_below_thresh; + unsigned long mtl_rx_fifo_fill_level_empty; + unsigned long mtl_rx_fifo_read_ctrl_flush; + unsigned long mtl_rx_fifo_read_ctrl_read_data; + unsigned long mtl_rx_fifo_read_ctrl_status; + unsigned long mtl_rx_fifo_read_ctrl_idle; + unsigned long mtl_rx_fifo_ctrl_active; + unsigned long mac_rx_frame_ctrl_fifo; + unsigned long mac_gmii_rx_proto_engine; + /* TSO */ + unsigned long tx_tso_frames; + unsigned long tx_tso_nfrags; + /* EST */ + unsigned long mtl_est_cgce; + unsigned long mtl_est_hlbs; + unsigned long mtl_est_hlbf; + unsigned long mtl_est_btre; + unsigned long mtl_est_btrlm; + /* per queue statistics */ + struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; + struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; +}; + +/* Safety Feature statistics exposed by ethtool */ +struct stmmac_safety_stats { + unsigned long mac_errors[32]; + unsigned long mtl_errors[32]; + unsigned long dma_errors[32]; + unsigned long dma_dpp_errors[32]; +}; + +/* Number of fields in Safety Stats */ +#define STMMAC_SAFETY_FEAT_SIZE \ + (sizeof(struct stmmac_safety_stats) / sizeof(unsigned long)) + +/* CSR Frequency Access Defines*/ +#define CSR_F_35M 35000000 +#define CSR_F_60M 60000000 +#define CSR_F_100M 100000000 +#define CSR_F_150M 150000000 +#define CSR_F_250M 250000000 +#define CSR_F_300M 300000000 + +#define MAC_CSR_H_FRQ_MASK 0x20 + +#define HASH_TABLE_SIZE 64 +#define PAUSE_TIME 0xffff + +/* Flow Control defines */ +#define FLOW_OFF 0 +#define FLOW_RX 1 +#define FLOW_TX 2 +#define FLOW_AUTO (FLOW_TX | FLOW_RX) + +/* PCS defines */ +#define STMMAC_PCS_RGMII (1 << 0) +#define STMMAC_PCS_SGMII (1 << 1) +#define STMMAC_PCS_TBI (1 << 2) +#define STMMAC_PCS_RTBI (1 << 3) + +#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ + +/* DAM HW feature register fields */ +#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */ +#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */ +#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */ +#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */ +#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */ +#define DMA_HW_FEAT_ADDMAC 0x00000020 /* Multiple MAC Addr Reg */ +#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */ +#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */ +#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */ +#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */ +#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */ +#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */ +#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 */ +#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 PTPv2 */ +#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */ +#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */ +#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */ +#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP COE (Type 1) in Rx */ +#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP COE (Type 2) in Rx */ +#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */ +#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. additional Rx Channels */ +#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. additional Tx Channels */ +#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate Descriptor */ +/* Timestamping with Internal System Time */ +#define DMA_HW_FEAT_INTTSEN 0x02000000 +#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */ +#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN */ +#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ +#define DEFAULT_DMA_PBL 8 + +/* MSI defines */ +#define STMMAC_MSI_VEC_MAX 32 + +/* PCS status and mask defines */ +#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */ +#define PCS_LINK_IRQ BIT(1) /* PCS Link */ +#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */ + +/* Max/Min RI Watchdog Timer count value */ +#define MAX_DMA_RIWT 0xff +#define MIN_DMA_RIWT 0x10 +#define DEF_DMA_RIWT 0xa0 +/* Tx coalesce parameters */ +#define STMMAC_COAL_TX_TIMER 1000 +#define STMMAC_MAX_COAL_TX_TICK 100000 +#define STMMAC_TX_MAX_FRAMES 256 +#define STMMAC_TX_FRAMES 25 +#define STMMAC_RX_FRAMES 0 + +/* Packets types */ +enum packets_types { + PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */ + PACKET_PTPQ = 0x2, /* PTP Packets */ + PACKET_DCBCPQ = 0x3, /* DCB Control Packets */ + PACKET_UPQ = 0x4, /* Untagged Packets */ + PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */ +}; + +/* Rx IPC status */ +enum rx_frame_status { + good_frame = 0x0, + discard_frame = 0x1, + csum_none = 0x2, + llc_snap = 0x4, + dma_own = 0x8, + rx_not_ls = 0x10, +}; + +/* Tx status */ +enum tx_frame_status { + tx_done = 0x0, + tx_not_ls = 0x1, + tx_err = 0x2, + tx_dma_own = 0x4, + tx_err_bump_tc = 0x8, +}; + +enum dma_irq_status { + tx_hard_error = 0x1, + tx_hard_error_bump_tc = 0x2, + handle_rx = 0x4, + handle_tx = 0x8, +}; + +enum dma_irq_dir { + DMA_DIR_RX = 0x1, + DMA_DIR_TX = 0x2, + DMA_DIR_RXTX = 0x3, +}; + +enum request_irq_err { + REQ_IRQ_ERR_ALL, + REQ_IRQ_ERR_TX, + REQ_IRQ_ERR_RX, + REQ_IRQ_ERR_SFTY_UE, + REQ_IRQ_ERR_SFTY_CE, + REQ_IRQ_ERR_LPI, + REQ_IRQ_ERR_WOL, + REQ_IRQ_ERR_MAC, + REQ_IRQ_ERR_NO, +}; + +/* EEE and LPI defines */ +#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) +#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) +#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) +#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) + +/* FPE defines */ +#define FPE_EVENT_UNKNOWN 0 +#define FPE_EVENT_TRSP BIT(0) +#define FPE_EVENT_TVER BIT(1) +#define FPE_EVENT_RRSP BIT(2) +#define FPE_EVENT_RVER BIT(3) + +#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) + +/* Physical Coding Sublayer */ +struct rgmii_adv { + unsigned int pause; + unsigned int duplex; + unsigned int lp_pause; + unsigned int lp_duplex; +}; + +#define STMMAC_PCS_PAUSE 1 +#define STMMAC_PCS_ASYM_PAUSE 2 + +/* DMA HW capabilities */ +struct dma_features { + unsigned int mbps_10_100; + unsigned int mbps_1000; + unsigned int half_duplex; + unsigned int hash_filter; + unsigned int multi_addr; + unsigned int pcs; + unsigned int sma_mdio; + unsigned int pmt_remote_wake_up; + unsigned int pmt_magic_frame; + unsigned int rmon; + /* IEEE 1588-2002 */ + unsigned int time_stamp; + /* IEEE 1588-2008 */ + unsigned int atime_stamp; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + unsigned int eee; + unsigned int av; + unsigned int hash_tb_sz; + unsigned int tsoen; + /* TX and RX csum */ + unsigned int tx_coe; + unsigned int rx_coe; + unsigned int rx_coe_type1; + unsigned int rx_coe_type2; + unsigned int rxfifo_over_2048; + /* TX and RX number of channels */ + unsigned int number_rx_channel; + unsigned int number_tx_channel; + /* TX and RX number of queues */ + unsigned int number_rx_queues; + unsigned int number_tx_queues; + /* PPS output */ + unsigned int pps_out_num; + /* Alternate (enhanced) DESC mode */ + unsigned int enh_desc; + /* TX and RX FIFO sizes */ + unsigned int tx_fifo_size; + unsigned int rx_fifo_size; + /* Automotive Safety Package */ + unsigned int asp; + /* RX Parser */ + unsigned int frpsel; + unsigned int frpbs; + unsigned int frpes; + unsigned int addr64; + unsigned int host_dma_width; + unsigned int rssen; + unsigned int vlhash; + unsigned int sphen; + unsigned int vlins; + unsigned int dvlan; + unsigned int l3l4fnum; + unsigned int arpoffsel; + /* TSN Features */ + unsigned int estwid; + unsigned int estdep; + unsigned int estsel; + unsigned int fpesel; + unsigned int tbssel; + /* Numbers of Auxiliary Snapshot Inputs */ + unsigned int aux_snapshot_n; +}; + +/* RX Buffer size must be multiple of 4/8/16 bytes */ +#define BUF_SIZE_16KiB 16368 +#define BUF_SIZE_8KiB 8188 +#define BUF_SIZE_4KiB 4096 +#define BUF_SIZE_2KiB 2048 + +/* Power Down and WOL */ +#define PMT_NOT_SUPPORTED 0 +#define PMT_SUPPORTED 1 + +/* Common MAC defines */ +#define MAC_CTRL_REG 0x00000000 /* MAC Control */ +#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ +#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ + +/* Default LPI timers */ +#define STMMAC_DEFAULT_LIT_LS 0x3E8 +#define STMMAC_DEFAULT_TWT_LS 0x1E +#define STMMAC_ET_MAX 0xFFFFF + +#define STMMAC_CHAIN_MODE 0x1 +#define STMMAC_RING_MODE 0x2 + +#define JUMBO_LEN 9000 + +/* Receive Side Scaling */ +#define STMMAC_RSS_HASH_KEY_SIZE 40 +#define STMMAC_RSS_MAX_TABLE_SIZE 256 + +/* VLAN */ +#define STMMAC_VLAN_NONE 0x0 +#define STMMAC_VLAN_REMOVE 0x1 +#define STMMAC_VLAN_INSERT 0x2 +#define STMMAC_VLAN_REPLACE 0x3 + +extern const struct stmmac_desc_ops enh_desc_ops; +extern const struct stmmac_desc_ops ndesc_ops; + +struct mac_device_info; + +extern const struct stmmac_hwtimestamp stmmac_ptp; +extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; + +struct mac_link { + u32 speed_mask; + u32 speed10; + u32 speed100; + u32 speed1000; + u32 speed2500; + u32 duplex; + struct { + u32 speed2500; + u32 speed5000; + u32 speed10000; + } xgmii; + struct { + u32 speed25000; + u32 speed40000; + u32 speed50000; + u32 speed100000; + } xlgmii; +}; + +struct mii_regs { + unsigned int addr; /* MII Address */ + unsigned int data; /* MII Data */ + unsigned int addr_shift; /* MII address shift */ + unsigned int reg_shift; /* MII reg shift */ + unsigned int addr_mask; /* MII address mask */ + unsigned int reg_mask; /* MII reg mask */ + unsigned int clk_csr_shift; + unsigned int clk_csr_mask; +}; + +struct mac_device_info { + const struct stmmac_ops *mac; + const struct stmmac_desc_ops *desc; + const struct stmmac_dma_ops *dma; + const struct stmmac_mode_ops *mode; + const struct stmmac_hwtimestamp *ptp; + const struct stmmac_tc_ops *tc; + const struct stmmac_mmc_ops *mmc; + struct dw_xpcs *xpcs; + struct mii_regs mii; /* MII register Addresses */ + struct mac_link link; + void __iomem *pcsr; /* vpointer to device CSRs */ + unsigned int multicast_filter_bins; + unsigned int unicast_filter_entries; + unsigned int mcast_bits_log2; + unsigned int rx_csum; + unsigned int pcs; + unsigned int pmt; + unsigned int ps; + unsigned int xlgmac; + unsigned int num_vlan; + u32 vlan_filter[32]; + bool vlan_fail_q_en; + u8 vlan_fail_q; +}; + +struct stmmac_rx_routing { + u32 reg_mask; + u32 reg_shift; +}; + +int dwmac100_setup(struct stmmac_priv *priv); +int dwmac1000_setup(struct stmmac_priv *priv); +int dwmac4_setup(struct stmmac_priv *priv); +int dwxgmac2_setup(struct stmmac_priv *priv); +int dwxlgmac2_setup(struct stmmac_priv *priv); + +void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], + unsigned int high, unsigned int low); +void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low); +void stmmac_set_mac(void __iomem *ioaddr, bool enable); + +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], + unsigned int high, unsigned int low); +void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low); +void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); + +void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); + +extern const struct stmmac_mode_ops ring_mode_ops; +extern const struct stmmac_mode_ops chain_mode_ops; +extern const struct stmmac_desc_ops dwmac4_desc_ops; + +#endif /* __COMMON_H__ */ diff --git a/devices/stmmac/common-6.1-orig.h b/devices/stmmac/common-6.1-orig.h new file mode 100644 index 00000000..c11d6268 --- /dev/null +++ b/devices/stmmac/common-6.1-orig.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + STMMAC Common Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __COMMON_H__ +#define __COMMON_H__ + +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#define STMMAC_VLAN_TAG_USED +#include +#endif + +#include "descs.h" +#include "hwif.h" +#include "mmc.h" + +/* Synopsys Core versions */ +#define DWMAC_CORE_3_40 0x34 +#define DWMAC_CORE_3_50 0x35 +#define DWMAC_CORE_4_00 0x40 +#define DWMAC_CORE_4_10 0x41 +#define DWMAC_CORE_5_00 0x50 +#define DWMAC_CORE_5_10 0x51 +#define DWMAC_CORE_5_20 0x52 +#define DWXGMAC_CORE_2_10 0x21 +#define DWXLGMAC_CORE_2_00 0x20 + +/* Device ID */ +#define DWXGMAC_ID 0x76 +#define DWXLGMAC_ID 0x27 + +#define STMMAC_CHAN0 0 /* Always supported and default for all chips */ + +/* TX and RX Descriptor Length, these need to be power of two. + * TX descriptor length less than 64 may cause transmit queue timed out error. + * RX descriptor length less than 64 may cause inconsistent Rx chain error. + */ +#define DMA_MIN_TX_SIZE 64 +#define DMA_MAX_TX_SIZE 1024 +#define DMA_DEFAULT_TX_SIZE 512 +#define DMA_MIN_RX_SIZE 64 +#define DMA_MAX_RX_SIZE 1024 +#define DMA_DEFAULT_RX_SIZE 512 +#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) + +#undef FRAME_FILTER_DEBUG +/* #define FRAME_FILTER_DEBUG */ + +struct stmmac_txq_stats { + unsigned long tx_pkt_n; + unsigned long tx_normal_irq_n; +}; + +struct stmmac_rxq_stats { + unsigned long rx_pkt_n; + unsigned long rx_normal_irq_n; +}; + +/* Extra statistic and debug information exposed by ethtool */ +struct stmmac_extra_stats { + /* Transmit errors */ + unsigned long tx_underflow ____cacheline_aligned; + unsigned long tx_carrier; + unsigned long tx_losscarrier; + unsigned long vlan_tag; + unsigned long tx_deferred; + unsigned long tx_vlan; + unsigned long tx_jabber; + unsigned long tx_frame_flushed; + unsigned long tx_payload_error; + unsigned long tx_ip_header_error; + /* Receive errors */ + unsigned long rx_desc; + unsigned long sa_filter_fail; + unsigned long overflow_error; + unsigned long ipc_csum_error; + unsigned long rx_collision; + unsigned long rx_crc_errors; + unsigned long dribbling_bit; + unsigned long rx_length; + unsigned long rx_mii; + unsigned long rx_multicast; + unsigned long rx_gmac_overflow; + unsigned long rx_watchdog; + unsigned long da_rx_filter_fail; + unsigned long sa_rx_filter_fail; + unsigned long rx_missed_cntr; + unsigned long rx_overflow_cntr; + unsigned long rx_vlan; + unsigned long rx_split_hdr_pkt_n; + /* Tx/Rx IRQ error info */ + unsigned long tx_undeflow_irq; + unsigned long tx_process_stopped_irq; + unsigned long tx_jabber_irq; + unsigned long rx_overflow_irq; + unsigned long rx_buf_unav_irq; + unsigned long rx_process_stopped_irq; + unsigned long rx_watchdog_irq; + unsigned long tx_early_irq; + unsigned long fatal_bus_error_irq; + /* Tx/Rx IRQ Events */ + unsigned long rx_early_irq; + unsigned long threshold; + unsigned long tx_pkt_n; + unsigned long rx_pkt_n; + unsigned long normal_irq_n; + unsigned long rx_normal_irq_n; + unsigned long napi_poll; + unsigned long tx_normal_irq_n; + unsigned long tx_clean; + unsigned long tx_set_ic_bit; + unsigned long irq_receive_pmt_irq_n; + /* MMC info */ + unsigned long mmc_tx_irq_n; + unsigned long mmc_rx_irq_n; + unsigned long mmc_rx_csum_offload_irq_n; + /* EEE */ + unsigned long irq_tx_path_in_lpi_mode_n; + unsigned long irq_tx_path_exit_lpi_mode_n; + unsigned long irq_rx_path_in_lpi_mode_n; + unsigned long irq_rx_path_exit_lpi_mode_n; + unsigned long phy_eee_wakeup_error_n; + /* Extended RDES status */ + unsigned long ip_hdr_err; + unsigned long ip_payload_err; + unsigned long ip_csum_bypassed; + unsigned long ipv4_pkt_rcvd; + unsigned long ipv6_pkt_rcvd; + unsigned long no_ptp_rx_msg_type_ext; + unsigned long ptp_rx_msg_type_sync; + unsigned long ptp_rx_msg_type_follow_up; + unsigned long ptp_rx_msg_type_delay_req; + unsigned long ptp_rx_msg_type_delay_resp; + unsigned long ptp_rx_msg_type_pdelay_req; + unsigned long ptp_rx_msg_type_pdelay_resp; + unsigned long ptp_rx_msg_type_pdelay_follow_up; + unsigned long ptp_rx_msg_type_announce; + unsigned long ptp_rx_msg_type_management; + unsigned long ptp_rx_msg_pkt_reserved_type; + unsigned long ptp_frame_type; + unsigned long ptp_ver; + unsigned long timestamp_dropped; + unsigned long av_pkt_rcvd; + unsigned long av_tagged_pkt_rcvd; + unsigned long vlan_tag_priority_val; + unsigned long l3_filter_match; + unsigned long l4_filter_match; + unsigned long l3_l4_filter_no_match; + /* PCS */ + unsigned long irq_pcs_ane_n; + unsigned long irq_pcs_link_n; + unsigned long irq_rgmii_n; + unsigned long pcs_link; + unsigned long pcs_duplex; + unsigned long pcs_speed; + /* debug register */ + unsigned long mtl_tx_status_fifo_full; + unsigned long mtl_tx_fifo_not_empty; + unsigned long mmtl_fifo_ctrl; + unsigned long mtl_tx_fifo_read_ctrl_write; + unsigned long mtl_tx_fifo_read_ctrl_wait; + unsigned long mtl_tx_fifo_read_ctrl_read; + unsigned long mtl_tx_fifo_read_ctrl_idle; + unsigned long mac_tx_in_pause; + unsigned long mac_tx_frame_ctrl_xfer; + unsigned long mac_tx_frame_ctrl_idle; + unsigned long mac_tx_frame_ctrl_wait; + unsigned long mac_tx_frame_ctrl_pause; + unsigned long mac_gmii_tx_proto_engine; + unsigned long mtl_rx_fifo_fill_level_full; + unsigned long mtl_rx_fifo_fill_above_thresh; + unsigned long mtl_rx_fifo_fill_below_thresh; + unsigned long mtl_rx_fifo_fill_level_empty; + unsigned long mtl_rx_fifo_read_ctrl_flush; + unsigned long mtl_rx_fifo_read_ctrl_read_data; + unsigned long mtl_rx_fifo_read_ctrl_status; + unsigned long mtl_rx_fifo_read_ctrl_idle; + unsigned long mtl_rx_fifo_ctrl_active; + unsigned long mac_rx_frame_ctrl_fifo; + unsigned long mac_gmii_rx_proto_engine; + /* TSO */ + unsigned long tx_tso_frames; + unsigned long tx_tso_nfrags; + /* EST */ + unsigned long mtl_est_cgce; + unsigned long mtl_est_hlbs; + unsigned long mtl_est_hlbf; + unsigned long mtl_est_btre; + unsigned long mtl_est_btrlm; + /* per queue statistics */ + struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; + struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; +}; + +/* Safety Feature statistics exposed by ethtool */ +struct stmmac_safety_stats { + unsigned long mac_errors[32]; + unsigned long mtl_errors[32]; + unsigned long dma_errors[32]; + unsigned long dma_dpp_errors[32]; +}; + +/* Number of fields in Safety Stats */ +#define STMMAC_SAFETY_FEAT_SIZE \ + (sizeof(struct stmmac_safety_stats) / sizeof(unsigned long)) + +/* CSR Frequency Access Defines*/ +#define CSR_F_35M 35000000 +#define CSR_F_60M 60000000 +#define CSR_F_100M 100000000 +#define CSR_F_150M 150000000 +#define CSR_F_250M 250000000 +#define CSR_F_300M 300000000 + +#define MAC_CSR_H_FRQ_MASK 0x20 + +#define HASH_TABLE_SIZE 64 +#define PAUSE_TIME 0xffff + +/* Flow Control defines */ +#define FLOW_OFF 0 +#define FLOW_RX 1 +#define FLOW_TX 2 +#define FLOW_AUTO (FLOW_TX | FLOW_RX) + +/* PCS defines */ +#define STMMAC_PCS_RGMII (1 << 0) +#define STMMAC_PCS_SGMII (1 << 1) +#define STMMAC_PCS_TBI (1 << 2) +#define STMMAC_PCS_RTBI (1 << 3) + +#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ + +/* DAM HW feature register fields */ +#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */ +#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */ +#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */ +#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */ +#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */ +#define DMA_HW_FEAT_ADDMAC 0x00000020 /* Multiple MAC Addr Reg */ +#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */ +#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */ +#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */ +#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */ +#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */ +#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */ +#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 */ +#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 PTPv2 */ +#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */ +#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */ +#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */ +#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP COE (Type 1) in Rx */ +#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP COE (Type 2) in Rx */ +#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */ +#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. additional Rx Channels */ +#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. additional Tx Channels */ +#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate Descriptor */ +/* Timestamping with Internal System Time */ +#define DMA_HW_FEAT_INTTSEN 0x02000000 +#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */ +#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN */ +#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ +#define DEFAULT_DMA_PBL 8 + +/* MSI defines */ +#define STMMAC_MSI_VEC_MAX 32 + +/* PCS status and mask defines */ +#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */ +#define PCS_LINK_IRQ BIT(1) /* PCS Link */ +#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */ + +/* Max/Min RI Watchdog Timer count value */ +#define MAX_DMA_RIWT 0xff +#define MIN_DMA_RIWT 0x10 +#define DEF_DMA_RIWT 0xa0 +/* Tx coalesce parameters */ +#define STMMAC_COAL_TX_TIMER 1000 +#define STMMAC_MAX_COAL_TX_TICK 100000 +#define STMMAC_TX_MAX_FRAMES 256 +#define STMMAC_TX_FRAMES 25 +#define STMMAC_RX_FRAMES 0 + +/* Packets types */ +enum packets_types { + PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */ + PACKET_PTPQ = 0x2, /* PTP Packets */ + PACKET_DCBCPQ = 0x3, /* DCB Control Packets */ + PACKET_UPQ = 0x4, /* Untagged Packets */ + PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */ +}; + +/* Rx IPC status */ +enum rx_frame_status { + good_frame = 0x0, + discard_frame = 0x1, + csum_none = 0x2, + llc_snap = 0x4, + dma_own = 0x8, + rx_not_ls = 0x10, +}; + +/* Tx status */ +enum tx_frame_status { + tx_done = 0x0, + tx_not_ls = 0x1, + tx_err = 0x2, + tx_dma_own = 0x4, + tx_err_bump_tc = 0x8, +}; + +enum dma_irq_status { + tx_hard_error = 0x1, + tx_hard_error_bump_tc = 0x2, + handle_rx = 0x4, + handle_tx = 0x8, +}; + +enum dma_irq_dir { + DMA_DIR_RX = 0x1, + DMA_DIR_TX = 0x2, + DMA_DIR_RXTX = 0x3, +}; + +enum request_irq_err { + REQ_IRQ_ERR_ALL, + REQ_IRQ_ERR_TX, + REQ_IRQ_ERR_RX, + REQ_IRQ_ERR_SFTY_UE, + REQ_IRQ_ERR_SFTY_CE, + REQ_IRQ_ERR_LPI, + REQ_IRQ_ERR_WOL, + REQ_IRQ_ERR_MAC, + REQ_IRQ_ERR_NO, +}; + +/* EEE and LPI defines */ +#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) +#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) +#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) +#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) + +/* FPE defines */ +#define FPE_EVENT_UNKNOWN 0 +#define FPE_EVENT_TRSP BIT(0) +#define FPE_EVENT_TVER BIT(1) +#define FPE_EVENT_RRSP BIT(2) +#define FPE_EVENT_RVER BIT(3) + +#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) + +/* Physical Coding Sublayer */ +struct rgmii_adv { + unsigned int pause; + unsigned int duplex; + unsigned int lp_pause; + unsigned int lp_duplex; +}; + +#define STMMAC_PCS_PAUSE 1 +#define STMMAC_PCS_ASYM_PAUSE 2 + +/* DMA HW capabilities */ +struct dma_features { + unsigned int mbps_10_100; + unsigned int mbps_1000; + unsigned int half_duplex; + unsigned int hash_filter; + unsigned int multi_addr; + unsigned int pcs; + unsigned int sma_mdio; + unsigned int pmt_remote_wake_up; + unsigned int pmt_magic_frame; + unsigned int rmon; + /* IEEE 1588-2002 */ + unsigned int time_stamp; + /* IEEE 1588-2008 */ + unsigned int atime_stamp; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + unsigned int eee; + unsigned int av; + unsigned int hash_tb_sz; + unsigned int tsoen; + /* TX and RX csum */ + unsigned int tx_coe; + unsigned int rx_coe; + unsigned int rx_coe_type1; + unsigned int rx_coe_type2; + unsigned int rxfifo_over_2048; + /* TX and RX number of channels */ + unsigned int number_rx_channel; + unsigned int number_tx_channel; + /* TX and RX number of queues */ + unsigned int number_rx_queues; + unsigned int number_tx_queues; + /* PPS output */ + unsigned int pps_out_num; + /* Alternate (enhanced) DESC mode */ + unsigned int enh_desc; + /* TX and RX FIFO sizes */ + unsigned int tx_fifo_size; + unsigned int rx_fifo_size; + /* Automotive Safety Package */ + unsigned int asp; + /* RX Parser */ + unsigned int frpsel; + unsigned int frpbs; + unsigned int frpes; + unsigned int addr64; + unsigned int host_dma_width; + unsigned int rssen; + unsigned int vlhash; + unsigned int sphen; + unsigned int vlins; + unsigned int dvlan; + unsigned int l3l4fnum; + unsigned int arpoffsel; + /* TSN Features */ + unsigned int estwid; + unsigned int estdep; + unsigned int estsel; + unsigned int fpesel; + unsigned int tbssel; + /* Numbers of Auxiliary Snapshot Inputs */ + unsigned int aux_snapshot_n; +}; + +/* RX Buffer size must be multiple of 4/8/16 bytes */ +#define BUF_SIZE_16KiB 16368 +#define BUF_SIZE_8KiB 8188 +#define BUF_SIZE_4KiB 4096 +#define BUF_SIZE_2KiB 2048 + +/* Power Down and WOL */ +#define PMT_NOT_SUPPORTED 0 +#define PMT_SUPPORTED 1 + +/* Common MAC defines */ +#define MAC_CTRL_REG 0x00000000 /* MAC Control */ +#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ +#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ + +/* Default LPI timers */ +#define STMMAC_DEFAULT_LIT_LS 0x3E8 +#define STMMAC_DEFAULT_TWT_LS 0x1E +#define STMMAC_ET_MAX 0xFFFFF + +#define STMMAC_CHAIN_MODE 0x1 +#define STMMAC_RING_MODE 0x2 + +#define JUMBO_LEN 9000 + +/* Receive Side Scaling */ +#define STMMAC_RSS_HASH_KEY_SIZE 40 +#define STMMAC_RSS_MAX_TABLE_SIZE 256 + +/* VLAN */ +#define STMMAC_VLAN_NONE 0x0 +#define STMMAC_VLAN_REMOVE 0x1 +#define STMMAC_VLAN_INSERT 0x2 +#define STMMAC_VLAN_REPLACE 0x3 + +extern const struct stmmac_desc_ops enh_desc_ops; +extern const struct stmmac_desc_ops ndesc_ops; + +struct mac_device_info; + +extern const struct stmmac_hwtimestamp stmmac_ptp; +extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; + +struct mac_link { + u32 speed_mask; + u32 speed10; + u32 speed100; + u32 speed1000; + u32 speed2500; + u32 duplex; + struct { + u32 speed2500; + u32 speed5000; + u32 speed10000; + } xgmii; + struct { + u32 speed25000; + u32 speed40000; + u32 speed50000; + u32 speed100000; + } xlgmii; +}; + +struct mii_regs { + unsigned int addr; /* MII Address */ + unsigned int data; /* MII Data */ + unsigned int addr_shift; /* MII address shift */ + unsigned int reg_shift; /* MII reg shift */ + unsigned int addr_mask; /* MII address mask */ + unsigned int reg_mask; /* MII reg mask */ + unsigned int clk_csr_shift; + unsigned int clk_csr_mask; +}; + +struct mac_device_info { + const struct stmmac_ops *mac; + const struct stmmac_desc_ops *desc; + const struct stmmac_dma_ops *dma; + const struct stmmac_mode_ops *mode; + const struct stmmac_hwtimestamp *ptp; + const struct stmmac_tc_ops *tc; + const struct stmmac_mmc_ops *mmc; + struct dw_xpcs *xpcs; + struct mii_regs mii; /* MII register Addresses */ + struct mac_link link; + void __iomem *pcsr; /* vpointer to device CSRs */ + unsigned int multicast_filter_bins; + unsigned int unicast_filter_entries; + unsigned int mcast_bits_log2; + unsigned int rx_csum; + unsigned int pcs; + unsigned int pmt; + unsigned int ps; + unsigned int xlgmac; + unsigned int num_vlan; + u32 vlan_filter[32]; + bool vlan_fail_q_en; + u8 vlan_fail_q; +}; + +struct stmmac_rx_routing { + u32 reg_mask; + u32 reg_shift; +}; + +int dwmac100_setup(struct stmmac_priv *priv); +int dwmac1000_setup(struct stmmac_priv *priv); +int dwmac4_setup(struct stmmac_priv *priv); +int dwxgmac2_setup(struct stmmac_priv *priv); +int dwxlgmac2_setup(struct stmmac_priv *priv); + +void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], + unsigned int high, unsigned int low); +void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low); +void stmmac_set_mac(void __iomem *ioaddr, bool enable); + +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], + unsigned int high, unsigned int low); +void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low); +void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); + +void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); + +extern const struct stmmac_mode_ops ring_mode_ops; +extern const struct stmmac_mode_ops chain_mode_ops; +extern const struct stmmac_desc_ops dwmac4_desc_ops; + +#endif /* __COMMON_H__ */ diff --git a/devices/stmmac/descs-6.1-ethercat.h b/devices/stmmac/descs-6.1-ethercat.h new file mode 100644 index 00000000..49d6a866 --- /dev/null +++ b/devices/stmmac/descs-6.1-ethercat.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + Header File to describe the DMA descriptors and related definitions. + This is for DWMAC100 and 1000 cores. + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DESCS_H__ +#define __DESCS_H__ + +#include + +/* Normal receive descriptor defines */ + +/* RDES0 */ +#define RDES0_PAYLOAD_CSUM_ERR BIT(0) +#define RDES0_CRC_ERROR BIT(1) +#define RDES0_DRIBBLING BIT(2) +#define RDES0_MII_ERROR BIT(3) +#define RDES0_RECEIVE_WATCHDOG BIT(4) +#define RDES0_FRAME_TYPE BIT(5) +#define RDES0_COLLISION BIT(6) +#define RDES0_IPC_CSUM_ERROR BIT(7) +#define RDES0_LAST_DESCRIPTOR BIT(8) +#define RDES0_FIRST_DESCRIPTOR BIT(9) +#define RDES0_VLAN_TAG BIT(10) +#define RDES0_OVERFLOW_ERROR BIT(11) +#define RDES0_LENGTH_ERROR BIT(12) +#define RDES0_SA_FILTER_FAIL BIT(13) +#define RDES0_DESCRIPTOR_ERROR BIT(14) +#define RDES0_ERROR_SUMMARY BIT(15) +#define RDES0_FRAME_LEN_MASK GENMASK(29, 16) +#define RDES0_FRAME_LEN_SHIFT 16 +#define RDES0_DA_FILTER_FAIL BIT(30) +#define RDES0_OWN BIT(31) + /* RDES1 */ +#define RDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) +#define RDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) +#define RDES1_BUFFER2_SIZE_SHIFT 11 +#define RDES1_SECOND_ADDRESS_CHAINED BIT(24) +#define RDES1_END_RING BIT(25) +#define RDES1_DISABLE_IC BIT(31) + +/* Enhanced receive descriptor defines */ + +/* RDES0 (similar to normal RDES) */ +#define ERDES0_RX_MAC_ADDR BIT(0) + +/* RDES1: completely differ from normal desc definitions */ +#define ERDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) +#define ERDES1_SECOND_ADDRESS_CHAINED BIT(14) +#define ERDES1_END_RING BIT(15) +#define ERDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) +#define ERDES1_BUFFER2_SIZE_SHIFT 16 +#define ERDES1_DISABLE_IC BIT(31) + +/* Normal transmit descriptor defines */ +/* TDES0 */ +#define TDES0_DEFERRED BIT(0) +#define TDES0_UNDERFLOW_ERROR BIT(1) +#define TDES0_EXCESSIVE_DEFERRAL BIT(2) +#define TDES0_COLLISION_COUNT_MASK GENMASK(6, 3) +#define TDES0_VLAN_FRAME BIT(7) +#define TDES0_EXCESSIVE_COLLISIONS BIT(8) +#define TDES0_LATE_COLLISION BIT(9) +#define TDES0_NO_CARRIER BIT(10) +#define TDES0_LOSS_CARRIER BIT(11) +#define TDES0_PAYLOAD_ERROR BIT(12) +#define TDES0_FRAME_FLUSHED BIT(13) +#define TDES0_JABBER_TIMEOUT BIT(14) +#define TDES0_ERROR_SUMMARY BIT(15) +#define TDES0_IP_HEADER_ERROR BIT(16) +#define TDES0_TIME_STAMP_STATUS BIT(17) +#define TDES0_OWN ((u32)BIT(31)) /* silence sparse */ +/* TDES1 */ +#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) +#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) +#define TDES1_BUFFER2_SIZE_SHIFT 11 +#define TDES1_TIME_STAMP_ENABLE BIT(22) +#define TDES1_DISABLE_PADDING BIT(23) +#define TDES1_SECOND_ADDRESS_CHAINED BIT(24) +#define TDES1_END_RING BIT(25) +#define TDES1_CRC_DISABLE BIT(26) +#define TDES1_CHECKSUM_INSERTION_MASK GENMASK(28, 27) +#define TDES1_CHECKSUM_INSERTION_SHIFT 27 +#define TDES1_FIRST_SEGMENT BIT(29) +#define TDES1_LAST_SEGMENT BIT(30) +#define TDES1_INTERRUPT BIT(31) + +/* Enhanced transmit descriptor defines */ +/* TDES0 */ +#define ETDES0_DEFERRED BIT(0) +#define ETDES0_UNDERFLOW_ERROR BIT(1) +#define ETDES0_EXCESSIVE_DEFERRAL BIT(2) +#define ETDES0_COLLISION_COUNT_MASK GENMASK(6, 3) +#define ETDES0_VLAN_FRAME BIT(7) +#define ETDES0_EXCESSIVE_COLLISIONS BIT(8) +#define ETDES0_LATE_COLLISION BIT(9) +#define ETDES0_NO_CARRIER BIT(10) +#define ETDES0_LOSS_CARRIER BIT(11) +#define ETDES0_PAYLOAD_ERROR BIT(12) +#define ETDES0_FRAME_FLUSHED BIT(13) +#define ETDES0_JABBER_TIMEOUT BIT(14) +#define ETDES0_ERROR_SUMMARY BIT(15) +#define ETDES0_IP_HEADER_ERROR BIT(16) +#define ETDES0_TIME_STAMP_STATUS BIT(17) +#define ETDES0_SECOND_ADDRESS_CHAINED BIT(20) +#define ETDES0_END_RING BIT(21) +#define ETDES0_CHECKSUM_INSERTION_MASK GENMASK(23, 22) +#define ETDES0_CHECKSUM_INSERTION_SHIFT 22 +#define ETDES0_TIME_STAMP_ENABLE BIT(25) +#define ETDES0_DISABLE_PADDING BIT(26) +#define ETDES0_CRC_DISABLE BIT(27) +#define ETDES0_FIRST_SEGMENT BIT(28) +#define ETDES0_LAST_SEGMENT BIT(29) +#define ETDES0_INTERRUPT BIT(30) +#define ETDES0_OWN ((u32)BIT(31)) /* silence sparse */ +/* TDES1 */ +#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) +#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) +#define ETDES1_BUFFER2_SIZE_SHIFT 16 + +/* Extended Receive descriptor definitions */ +#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(6, 2) +#define ERDES4_IP_HDR_ERR BIT(3) +#define ERDES4_IP_PAYLOAD_ERR BIT(4) +#define ERDES4_IP_CSUM_BYPASSED BIT(5) +#define ERDES4_IPV4_PKT_RCVD BIT(6) +#define ERDES4_IPV6_PKT_RCVD BIT(7) +#define ERDES4_MSG_TYPE_MASK GENMASK(11, 8) +#define ERDES4_PTP_FRAME_TYPE BIT(12) +#define ERDES4_PTP_VER BIT(13) +#define ERDES4_TIMESTAMP_DROPPED BIT(14) +#define ERDES4_AV_PKT_RCVD BIT(16) +#define ERDES4_AV_TAGGED_PKT_RCVD BIT(17) +#define ERDES4_VLAN_TAG_PRI_VAL_MASK GENMASK(20, 18) +#define ERDES4_L3_FILTER_MATCH BIT(24) +#define ERDES4_L4_FILTER_MATCH BIT(25) +#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) + +/* Extended RDES4 message type definitions */ +#define RDES_EXT_NO_PTP 0x0 +#define RDES_EXT_SYNC 0x1 +#define RDES_EXT_FOLLOW_UP 0x2 +#define RDES_EXT_DELAY_REQ 0x3 +#define RDES_EXT_DELAY_RESP 0x4 +#define RDES_EXT_PDELAY_REQ 0x5 +#define RDES_EXT_PDELAY_RESP 0x6 +#define RDES_EXT_PDELAY_FOLLOW_UP 0x7 +#define RDES_PTP_ANNOUNCE 0x8 +#define RDES_PTP_MANAGEMENT 0x9 +#define RDES_PTP_SIGNALING 0xa +#define RDES_PTP_PKT_RESERVED_TYPE 0xf + +/* Basic descriptor structure for normal and alternate descriptors */ +struct dma_desc { + __le32 des0; + __le32 des1; + __le32 des2; + __le32 des3; +}; + +/* Extended descriptor structure (e.g. >= databook 3.50a) */ +struct dma_extended_desc { + struct dma_desc basic; /* Basic descriptors */ + __le32 des4; /* Extended Status */ + __le32 des5; /* Reserved */ + __le32 des6; /* Tx/Rx Timestamp Low */ + __le32 des7; /* Tx/Rx Timestamp High */ +}; + +/* Enhanced descriptor for TBS */ +struct dma_edesc { + __le32 des4; + __le32 des5; + __le32 des6; + __le32 des7; + struct dma_desc basic; +}; + +/* Transmit checksum insertion control */ +#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */ + +#endif /* __DESCS_H__ */ diff --git a/devices/stmmac/descs-6.1-orig.h b/devices/stmmac/descs-6.1-orig.h new file mode 100644 index 00000000..49d6a866 --- /dev/null +++ b/devices/stmmac/descs-6.1-orig.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + Header File to describe the DMA descriptors and related definitions. + This is for DWMAC100 and 1000 cores. + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DESCS_H__ +#define __DESCS_H__ + +#include + +/* Normal receive descriptor defines */ + +/* RDES0 */ +#define RDES0_PAYLOAD_CSUM_ERR BIT(0) +#define RDES0_CRC_ERROR BIT(1) +#define RDES0_DRIBBLING BIT(2) +#define RDES0_MII_ERROR BIT(3) +#define RDES0_RECEIVE_WATCHDOG BIT(4) +#define RDES0_FRAME_TYPE BIT(5) +#define RDES0_COLLISION BIT(6) +#define RDES0_IPC_CSUM_ERROR BIT(7) +#define RDES0_LAST_DESCRIPTOR BIT(8) +#define RDES0_FIRST_DESCRIPTOR BIT(9) +#define RDES0_VLAN_TAG BIT(10) +#define RDES0_OVERFLOW_ERROR BIT(11) +#define RDES0_LENGTH_ERROR BIT(12) +#define RDES0_SA_FILTER_FAIL BIT(13) +#define RDES0_DESCRIPTOR_ERROR BIT(14) +#define RDES0_ERROR_SUMMARY BIT(15) +#define RDES0_FRAME_LEN_MASK GENMASK(29, 16) +#define RDES0_FRAME_LEN_SHIFT 16 +#define RDES0_DA_FILTER_FAIL BIT(30) +#define RDES0_OWN BIT(31) + /* RDES1 */ +#define RDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) +#define RDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) +#define RDES1_BUFFER2_SIZE_SHIFT 11 +#define RDES1_SECOND_ADDRESS_CHAINED BIT(24) +#define RDES1_END_RING BIT(25) +#define RDES1_DISABLE_IC BIT(31) + +/* Enhanced receive descriptor defines */ + +/* RDES0 (similar to normal RDES) */ +#define ERDES0_RX_MAC_ADDR BIT(0) + +/* RDES1: completely differ from normal desc definitions */ +#define ERDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) +#define ERDES1_SECOND_ADDRESS_CHAINED BIT(14) +#define ERDES1_END_RING BIT(15) +#define ERDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) +#define ERDES1_BUFFER2_SIZE_SHIFT 16 +#define ERDES1_DISABLE_IC BIT(31) + +/* Normal transmit descriptor defines */ +/* TDES0 */ +#define TDES0_DEFERRED BIT(0) +#define TDES0_UNDERFLOW_ERROR BIT(1) +#define TDES0_EXCESSIVE_DEFERRAL BIT(2) +#define TDES0_COLLISION_COUNT_MASK GENMASK(6, 3) +#define TDES0_VLAN_FRAME BIT(7) +#define TDES0_EXCESSIVE_COLLISIONS BIT(8) +#define TDES0_LATE_COLLISION BIT(9) +#define TDES0_NO_CARRIER BIT(10) +#define TDES0_LOSS_CARRIER BIT(11) +#define TDES0_PAYLOAD_ERROR BIT(12) +#define TDES0_FRAME_FLUSHED BIT(13) +#define TDES0_JABBER_TIMEOUT BIT(14) +#define TDES0_ERROR_SUMMARY BIT(15) +#define TDES0_IP_HEADER_ERROR BIT(16) +#define TDES0_TIME_STAMP_STATUS BIT(17) +#define TDES0_OWN ((u32)BIT(31)) /* silence sparse */ +/* TDES1 */ +#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) +#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) +#define TDES1_BUFFER2_SIZE_SHIFT 11 +#define TDES1_TIME_STAMP_ENABLE BIT(22) +#define TDES1_DISABLE_PADDING BIT(23) +#define TDES1_SECOND_ADDRESS_CHAINED BIT(24) +#define TDES1_END_RING BIT(25) +#define TDES1_CRC_DISABLE BIT(26) +#define TDES1_CHECKSUM_INSERTION_MASK GENMASK(28, 27) +#define TDES1_CHECKSUM_INSERTION_SHIFT 27 +#define TDES1_FIRST_SEGMENT BIT(29) +#define TDES1_LAST_SEGMENT BIT(30) +#define TDES1_INTERRUPT BIT(31) + +/* Enhanced transmit descriptor defines */ +/* TDES0 */ +#define ETDES0_DEFERRED BIT(0) +#define ETDES0_UNDERFLOW_ERROR BIT(1) +#define ETDES0_EXCESSIVE_DEFERRAL BIT(2) +#define ETDES0_COLLISION_COUNT_MASK GENMASK(6, 3) +#define ETDES0_VLAN_FRAME BIT(7) +#define ETDES0_EXCESSIVE_COLLISIONS BIT(8) +#define ETDES0_LATE_COLLISION BIT(9) +#define ETDES0_NO_CARRIER BIT(10) +#define ETDES0_LOSS_CARRIER BIT(11) +#define ETDES0_PAYLOAD_ERROR BIT(12) +#define ETDES0_FRAME_FLUSHED BIT(13) +#define ETDES0_JABBER_TIMEOUT BIT(14) +#define ETDES0_ERROR_SUMMARY BIT(15) +#define ETDES0_IP_HEADER_ERROR BIT(16) +#define ETDES0_TIME_STAMP_STATUS BIT(17) +#define ETDES0_SECOND_ADDRESS_CHAINED BIT(20) +#define ETDES0_END_RING BIT(21) +#define ETDES0_CHECKSUM_INSERTION_MASK GENMASK(23, 22) +#define ETDES0_CHECKSUM_INSERTION_SHIFT 22 +#define ETDES0_TIME_STAMP_ENABLE BIT(25) +#define ETDES0_DISABLE_PADDING BIT(26) +#define ETDES0_CRC_DISABLE BIT(27) +#define ETDES0_FIRST_SEGMENT BIT(28) +#define ETDES0_LAST_SEGMENT BIT(29) +#define ETDES0_INTERRUPT BIT(30) +#define ETDES0_OWN ((u32)BIT(31)) /* silence sparse */ +/* TDES1 */ +#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) +#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) +#define ETDES1_BUFFER2_SIZE_SHIFT 16 + +/* Extended Receive descriptor definitions */ +#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(6, 2) +#define ERDES4_IP_HDR_ERR BIT(3) +#define ERDES4_IP_PAYLOAD_ERR BIT(4) +#define ERDES4_IP_CSUM_BYPASSED BIT(5) +#define ERDES4_IPV4_PKT_RCVD BIT(6) +#define ERDES4_IPV6_PKT_RCVD BIT(7) +#define ERDES4_MSG_TYPE_MASK GENMASK(11, 8) +#define ERDES4_PTP_FRAME_TYPE BIT(12) +#define ERDES4_PTP_VER BIT(13) +#define ERDES4_TIMESTAMP_DROPPED BIT(14) +#define ERDES4_AV_PKT_RCVD BIT(16) +#define ERDES4_AV_TAGGED_PKT_RCVD BIT(17) +#define ERDES4_VLAN_TAG_PRI_VAL_MASK GENMASK(20, 18) +#define ERDES4_L3_FILTER_MATCH BIT(24) +#define ERDES4_L4_FILTER_MATCH BIT(25) +#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) + +/* Extended RDES4 message type definitions */ +#define RDES_EXT_NO_PTP 0x0 +#define RDES_EXT_SYNC 0x1 +#define RDES_EXT_FOLLOW_UP 0x2 +#define RDES_EXT_DELAY_REQ 0x3 +#define RDES_EXT_DELAY_RESP 0x4 +#define RDES_EXT_PDELAY_REQ 0x5 +#define RDES_EXT_PDELAY_RESP 0x6 +#define RDES_EXT_PDELAY_FOLLOW_UP 0x7 +#define RDES_PTP_ANNOUNCE 0x8 +#define RDES_PTP_MANAGEMENT 0x9 +#define RDES_PTP_SIGNALING 0xa +#define RDES_PTP_PKT_RESERVED_TYPE 0xf + +/* Basic descriptor structure for normal and alternate descriptors */ +struct dma_desc { + __le32 des0; + __le32 des1; + __le32 des2; + __le32 des3; +}; + +/* Extended descriptor structure (e.g. >= databook 3.50a) */ +struct dma_extended_desc { + struct dma_desc basic; /* Basic descriptors */ + __le32 des4; /* Extended Status */ + __le32 des5; /* Reserved */ + __le32 des6; /* Tx/Rx Timestamp Low */ + __le32 des7; /* Tx/Rx Timestamp High */ +}; + +/* Enhanced descriptor for TBS */ +struct dma_edesc { + __le32 des4; + __le32 des5; + __le32 des6; + __le32 des7; + struct dma_desc basic; +}; + +/* Transmit checksum insertion control */ +#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */ + +#endif /* __DESCS_H__ */ diff --git a/devices/stmmac/descs_com-6.1-ethercat.h b/devices/stmmac/descs_com-6.1-ethercat.h new file mode 100644 index 00000000..40f7f2da --- /dev/null +++ b/devices/stmmac/descs_com-6.1-ethercat.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + Header File to describe Normal/enhanced descriptor functions used for RING + and CHAINED modes. + + Copyright(C) 2011 STMicroelectronics Ltd + + It defines all the functions used to handle the normal/enhanced + descriptors in case of the DMA is configured to work in chained or + in ring mode. + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DESC_COM_H__ +#define __DESC_COM_H__ + +/* Specific functions used for Ring mode */ + +/* Enhanced descriptors */ +static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end, + int bfsize) +{ + if (bfsize == BUF_SIZE_16KiB) + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB + << ERDES1_BUFFER2_SIZE_SHIFT) + & ERDES1_BUFFER2_SIZE_MASK); + + if (end) + p->des1 |= cpu_to_le32(ERDES1_END_RING); +} + +static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end) +{ + if (end) + p->des0 |= cpu_to_le32(ETDES0_END_RING); + else + p->des0 &= cpu_to_le32(~ETDES0_END_RING); +} + +static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) +{ + if (unlikely(len > BUF_SIZE_4KiB)) { + p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB) + << ETDES1_BUFFER2_SIZE_SHIFT) + & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB + & ETDES1_BUFFER1_SIZE_MASK)); + } else + p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK)); +} + +/* Normal descriptors */ +static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize) +{ + if (bfsize >= BUF_SIZE_2KiB) { + int bfsize2; + + bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT) + & RDES1_BUFFER2_SIZE_MASK); + } + + if (end) + p->des1 |= cpu_to_le32(RDES1_END_RING); +} + +static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end) +{ + if (end) + p->des1 |= cpu_to_le32(TDES1_END_RING); + else + p->des1 &= cpu_to_le32(~TDES1_END_RING); +} + +static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len) +{ + if (unlikely(len > BUF_SIZE_2KiB)) { + unsigned int buffer1 = (BUF_SIZE_2KiB - 1) + & TDES1_BUFFER1_SIZE_MASK; + p->des1 |= cpu_to_le32((((len - buffer1) + << TDES1_BUFFER2_SIZE_SHIFT) + & TDES1_BUFFER2_SIZE_MASK) | buffer1); + } else + p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK)); +} + +/* Specific functions used for Chain mode */ + +/* Enhanced descriptors */ +static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p) +{ + p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED); +} + +static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED); +} + +static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len) +{ + p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK); +} + +/* Normal descriptors */ +static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end) +{ + p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED); +} + +static inline void ndesc_tx_set_on_chain(struct dma_desc *p) +{ + p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED); +} + +static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len) +{ + p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK); +} +#endif /* __DESC_COM_H__ */ diff --git a/devices/stmmac/descs_com-6.1-orig.h b/devices/stmmac/descs_com-6.1-orig.h new file mode 100644 index 00000000..40f7f2da --- /dev/null +++ b/devices/stmmac/descs_com-6.1-orig.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + Header File to describe Normal/enhanced descriptor functions used for RING + and CHAINED modes. + + Copyright(C) 2011 STMicroelectronics Ltd + + It defines all the functions used to handle the normal/enhanced + descriptors in case of the DMA is configured to work in chained or + in ring mode. + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DESC_COM_H__ +#define __DESC_COM_H__ + +/* Specific functions used for Ring mode */ + +/* Enhanced descriptors */ +static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end, + int bfsize) +{ + if (bfsize == BUF_SIZE_16KiB) + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB + << ERDES1_BUFFER2_SIZE_SHIFT) + & ERDES1_BUFFER2_SIZE_MASK); + + if (end) + p->des1 |= cpu_to_le32(ERDES1_END_RING); +} + +static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end) +{ + if (end) + p->des0 |= cpu_to_le32(ETDES0_END_RING); + else + p->des0 &= cpu_to_le32(~ETDES0_END_RING); +} + +static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) +{ + if (unlikely(len > BUF_SIZE_4KiB)) { + p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB) + << ETDES1_BUFFER2_SIZE_SHIFT) + & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB + & ETDES1_BUFFER1_SIZE_MASK)); + } else + p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK)); +} + +/* Normal descriptors */ +static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize) +{ + if (bfsize >= BUF_SIZE_2KiB) { + int bfsize2; + + bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT) + & RDES1_BUFFER2_SIZE_MASK); + } + + if (end) + p->des1 |= cpu_to_le32(RDES1_END_RING); +} + +static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end) +{ + if (end) + p->des1 |= cpu_to_le32(TDES1_END_RING); + else + p->des1 &= cpu_to_le32(~TDES1_END_RING); +} + +static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len) +{ + if (unlikely(len > BUF_SIZE_2KiB)) { + unsigned int buffer1 = (BUF_SIZE_2KiB - 1) + & TDES1_BUFFER1_SIZE_MASK; + p->des1 |= cpu_to_le32((((len - buffer1) + << TDES1_BUFFER2_SIZE_SHIFT) + & TDES1_BUFFER2_SIZE_MASK) | buffer1); + } else + p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK)); +} + +/* Specific functions used for Chain mode */ + +/* Enhanced descriptors */ +static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p) +{ + p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED); +} + +static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED); +} + +static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len) +{ + p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK); +} + +/* Normal descriptors */ +static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end) +{ + p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED); +} + +static inline void ndesc_tx_set_on_chain(struct dma_desc *p) +{ + p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED); +} + +static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len) +{ + p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK); +} +#endif /* __DESC_COM_H__ */ diff --git a/devices/stmmac/dwmac-intel-6.1-ethercat.c b/devices/stmmac/dwmac-intel-6.1-ethercat.c new file mode 100644 index 00000000..48fdc1b4 --- /dev/null +++ b/devices/stmmac/dwmac-intel-6.1-ethercat.c @@ -0,0 +1,1262 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation + */ + +#include +#include +#include +#include "dwmac-intel-6.1-ethercat.h" +#include "dwmac4-6.1-ethercat.h" +#include "stmmac-6.1-ethercat.h" +#include "stmmac_ptp-6.1-ethercat.h" + +struct intel_priv_data { + int mdio_adhoc_addr; /* mdio address for serdes & etc */ + unsigned long crossts_adj; + bool is_pse; +}; + +/* This struct is used to associate PCI Function of MAC controller on a board, + * discovered via DMI, with the address of PHY connected to the MAC. The + * negative value of the address means that MAC controller is not connected + * with PHY. + */ +struct stmmac_pci_func_data { + unsigned int func; + int phy_addr; +}; + +struct stmmac_pci_dmi_data { + const struct stmmac_pci_func_data *func; + size_t nfuncs; +}; + +struct stmmac_pci_info { + int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); +}; + +static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, + const struct dmi_system_id *dmi_list) +{ + const struct stmmac_pci_func_data *func_data; + const struct stmmac_pci_dmi_data *dmi_data; + const struct dmi_system_id *dmi_id; + int func = PCI_FUNC(pdev->devfn); + size_t n; + + dmi_id = dmi_first_match(dmi_list); + if (!dmi_id) + return -ENODEV; + + dmi_data = dmi_id->driver_data; + func_data = dmi_data->func; + + for (n = 0; n < dmi_data->nfuncs; n++, func_data++) + if (func_data->func == func) + return func_data->phy_addr; + + return -ENODEV; +} + +static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr, + int phyreg, u32 mask, u32 val) +{ + unsigned int retries = 10; + int val_rd; + + do { + val_rd = mdiobus_read(priv->mii, phyaddr, phyreg); + if ((val_rd & mask) == (val & mask)) + return 0; + udelay(POLL_DELAY_US); + } while (--retries); + + return -ETIMEDOUT; +} + +static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) +{ + struct intel_priv_data *intel_priv = priv_data; + struct stmmac_priv *priv = netdev_priv(ndev); + int serdes_phy_addr = 0; + u32 data = 0; + + if (!intel_priv->mdio_adhoc_addr) + return 0; + + serdes_phy_addr = intel_priv->mdio_adhoc_addr; + + /* Set the serdes rate and the PCLK rate */ + data = mdiobus_read(priv->mii, serdes_phy_addr, + SERDES_GCR0); + + data &= ~SERDES_RATE_MASK; + data &= ~SERDES_PCLK_MASK; + + if (priv->plat->max_speed == 2500) + data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT | + SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT; + else + data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT | + SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT; + + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* assert clk_req */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + data |= SERDES_PLL_CLK; + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* check for clk_ack assertion */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_PLL_CLK, + SERDES_PLL_CLK); + + if (data) { + dev_err(priv->device, "Serdes PLL clk request timeout\n"); + return data; + } + + /* assert lane reset */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + data |= SERDES_RST; + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* check for assert lane reset reflection */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_RST, + SERDES_RST); + + if (data) { + dev_err(priv->device, "Serdes assert lane reset timeout\n"); + return data; + } + + /* move power state to P0 */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + + data &= ~SERDES_PWR_ST_MASK; + data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT; + + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* Check for P0 state */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_PWR_ST_MASK, + SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT); + + if (data) { + dev_err(priv->device, "Serdes power state P0 timeout.\n"); + return data; + } + + /* PSE only - ungate SGMII PHY Rx Clock */ + if (intel_priv->is_pse) + mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, + 0, SERDES_PHY_RX_CLK); + + return 0; +} + +static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) +{ + struct intel_priv_data *intel_priv = intel_data; + struct stmmac_priv *priv = netdev_priv(ndev); + int serdes_phy_addr = 0; + u32 data = 0; + + if (!intel_priv->mdio_adhoc_addr) + return; + + serdes_phy_addr = intel_priv->mdio_adhoc_addr; + + /* PSE only - gate SGMII PHY Rx Clock */ + if (intel_priv->is_pse) + mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, + SERDES_PHY_RX_CLK, 0); + + /* move power state to P3 */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + + data &= ~SERDES_PWR_ST_MASK; + data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT; + + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* Check for P3 state */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_PWR_ST_MASK, + SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT); + + if (data) { + dev_err(priv->device, "Serdes power state P3 timeout\n"); + return; + } + + /* de-assert clk_req */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + data &= ~SERDES_PLL_CLK; + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* check for clk_ack de-assert */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_PLL_CLK, + (u32)~SERDES_PLL_CLK); + + if (data) { + dev_err(priv->device, "Serdes PLL clk de-assert timeout\n"); + return; + } + + /* de-assert lane reset */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + data &= ~SERDES_RST; + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* check for de-assert lane reset reflection */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_RST, + (u32)~SERDES_RST); + + if (data) { + dev_err(priv->device, "Serdes de-assert lane reset timeout\n"); + return; + } +} + +static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data) +{ + struct intel_priv_data *intel_priv = intel_data; + struct stmmac_priv *priv = netdev_priv(ndev); + int serdes_phy_addr = 0; + u32 data = 0; + + serdes_phy_addr = intel_priv->mdio_adhoc_addr; + + /* Determine the link speed mode: 2.5Gbps/1Gbps */ + data = mdiobus_read(priv->mii, serdes_phy_addr, + SERDES_GCR); + + if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) == + SERDES_LINK_MODE_2G5) { + dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n"); + priv->plat->max_speed = 2500; + priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; + priv->plat->mdio_bus_data->xpcs_an_inband = false; + } else { + priv->plat->max_speed = 1000; + } +} + +/* Program PTP Clock Frequency for different variant of + * Intel mGBE that has slightly different GPO mapping + */ +static void intel_mgbe_ptp_clk_freq_config(void *npriv) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)npriv; + struct intel_priv_data *intel_priv; + u32 gpio_value; + + intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv; + + gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS); + + if (intel_priv->is_pse) { + /* For PSE GbE, use 200MHz */ + gpio_value &= ~PSE_PTP_CLK_FREQ_MASK; + gpio_value |= PSE_PTP_CLK_FREQ_200MHZ; + } else { + /* For PCH GbE, use 200MHz */ + gpio_value &= ~PCH_PTP_CLK_FREQ_MASK; + gpio_value |= PCH_PTP_CLK_FREQ_200MHZ; + } + + writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS); +} + +static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, + u64 *art_time) +{ + u64 ns; + + ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0); + + *art_time = ns; +} + +static int stmmac_cross_ts_isr(struct stmmac_priv *priv) +{ + return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE); +} + +static int intel_crosststamp(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct intel_priv_data *intel_priv; + + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; + unsigned long flags; + u64 art_time = 0; + u64 ptp_time = 0; + u32 num_snapshot; + u32 gpio_value; + u32 acr_value; + int i; + + if (!boot_cpu_has(X86_FEATURE_ART)) + return -EOPNOTSUPP; + + intel_priv = priv->plat->bsp_priv; + + /* Both internal crosstimestamping and external triggered event + * timestamping cannot be run concurrently. + */ + if (priv->plat->ext_snapshot_en) + return -EBUSY; + + priv->plat->int_snapshot_en = 1; + + mutex_lock(&priv->aux_ts_lock); + /* Enable Internal snapshot trigger */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + switch (priv->plat->int_snapshot_num) { + case AUX_SNAPSHOT0: + acr_value |= PTP_ACR_ATSEN0; + break; + case AUX_SNAPSHOT1: + acr_value |= PTP_ACR_ATSEN1; + break; + case AUX_SNAPSHOT2: + acr_value |= PTP_ACR_ATSEN2; + break; + case AUX_SNAPSHOT3: + acr_value |= PTP_ACR_ATSEN3; + break; + default: + mutex_unlock(&priv->aux_ts_lock); + priv->plat->int_snapshot_en = 0; + return -EINVAL; + } + writel(acr_value, ptpaddr + PTP_ACR); + + /* Clear FIFO */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value |= PTP_ACR_ATSFC; + writel(acr_value, ptpaddr + PTP_ACR); + /* Release the mutex */ + mutex_unlock(&priv->aux_ts_lock); + + /* Trigger Internal snapshot signal + * Create a rising edge by just toggle the GPO1 to low + * and back to high. + */ + gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); + gpio_value &= ~GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + gpio_value |= GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + + /* Time sync done Indication - Interrupt method */ + if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait, + stmmac_cross_ts_isr(priv), + HZ / 100)) { + priv->plat->int_snapshot_en = 0; + return -ETIMEDOUT; + } + + num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & + GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + /* Repeat until the timestamps are from the FIFO last segment */ + for (i = 0; i < num_snapshot; i++) { + read_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, ptpaddr, &ptp_time); + *device = ns_to_ktime(ptp_time); + read_unlock_irqrestore(&priv->ptp_lock, flags); + get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time); + *system = convert_art_to_tsc(art_time); + } + + system->cycles *= intel_priv->crossts_adj; + priv->plat->int_snapshot_en = 0; + + return 0; +} + +static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv, + int base) +{ + if (boot_cpu_has(X86_FEATURE_ART)) { + unsigned int art_freq; + + /* On systems that support ART, ART frequency can be obtained + * from ECX register of CPUID leaf (0x15). + */ + art_freq = cpuid_ecx(ART_CPUID_LEAF); + do_div(art_freq, base); + intel_priv->crossts_adj = art_freq; + } +} + +static void common_default_data(struct plat_stmmacenet_data *plat) +{ + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ + plat->has_gmac = 1; + plat->force_sf_dma_mode = 1; + + plat->mdio_bus_data->needs_reset = true; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + /* Set default number of RX and TX queues to use */ + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; + + /* Disable Priority config by default */ + plat->tx_queues_cfg[0].use_prio = false; + plat->rx_queues_cfg[0].use_prio = false; + + /* Disable RX queues routing by default */ + plat->rx_queues_cfg[0].pkt_route = 0x0; +} + +static int intel_mgbe_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct fwnode_handle *fwnode; + char clk_name[20]; + int ret; + int i; + + plat->pdev = pdev; + plat->phy_addr = -1; + plat->clk_csr = 5; + plat->has_gmac = 0; + plat->has_gmac4 = 1; + plat->force_sf_dma_mode = 0; + plat->tso_en = 1; + plat->sph_disable = 1; + + /* Multiplying factor to the clk_eee_i clock time + * period to make it closer to 100 ns. This value + * should be programmed such that the clk_eee_time_period * + * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns + * clk_eee frequency is 19.2Mhz + * clk_eee_time_period is 52ns + * 52ns * (1 + 1) = 104ns + * MULT_FACT_100NS = 1 + */ + plat->mult_fact_100ns = 1; + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + + for (i = 0; i < plat->rx_queues_to_use; i++) { + plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + plat->rx_queues_cfg[i].chan = i; + + /* Disable Priority config by default */ + plat->rx_queues_cfg[i].use_prio = false; + + /* Disable RX queues routing by default */ + plat->rx_queues_cfg[i].pkt_route = 0x0; + } + + for (i = 0; i < plat->tx_queues_to_use; i++) { + plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + + /* Disable Priority config by default */ + plat->tx_queues_cfg[i].use_prio = false; + /* Default TX Q0 to use TSO and rest TXQ for TBS */ + if (i > 0) + plat->tx_queues_cfg[i].tbs_en = 1; + } + + /* FIFO size is 4096 bytes for 1 tx/rx queue */ + plat->tx_fifo_size = plat->tx_queues_to_use * 4096; + plat->rx_fifo_size = plat->rx_queues_to_use * 4096; + + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; + plat->tx_queues_cfg[0].weight = 0x09; + plat->tx_queues_cfg[1].weight = 0x0A; + plat->tx_queues_cfg[2].weight = 0x0B; + plat->tx_queues_cfg[3].weight = 0x0C; + plat->tx_queues_cfg[4].weight = 0x0D; + plat->tx_queues_cfg[5].weight = 0x0E; + plat->tx_queues_cfg[6].weight = 0x0F; + plat->tx_queues_cfg[7].weight = 0x10; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + plat->dma_cfg->fixed_burst = 0; + plat->dma_cfg->mixed_burst = 0; + plat->dma_cfg->aal = 0; + plat->dma_cfg->dche = true; + + plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), + GFP_KERNEL); + if (!plat->axi) + return -ENOMEM; + + plat->axi->axi_lpi_en = 0; + plat->axi->axi_xit_frm = 0; + plat->axi->axi_wr_osr_lmt = 1; + plat->axi->axi_rd_osr_lmt = 1; + plat->axi->axi_blen[0] = 4; + plat->axi->axi_blen[1] = 8; + plat->axi->axi_blen[2] = 16; + + plat->ptp_max_adj = plat->clk_ptp_rate; + plat->eee_usecs_rate = plat->clk_ptp_rate; + + /* Set system clock */ + sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev)); + + plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, + clk_name, NULL, 0, + plat->clk_ptp_rate); + + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); + plat->stmmac_clk = NULL; + } + + ret = clk_prepare_enable(plat->stmmac_clk); + if (ret) { + clk_unregister_fixed_rate(plat->stmmac_clk); + return ret; + } + + plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + plat->vlan_fail_q_en = true; + + /* Use the last Rx queue */ + plat->vlan_fail_q = plat->rx_queues_to_use - 1; + + /* For fixed-link setup, we allow phy-mode setting */ + fwnode = dev_fwnode(&pdev->dev); + if (fwnode) { + int phy_mode; + + /* "phy-mode" setting is optional. If it is set, + * we allow either sgmii or 1000base-x for now. + */ + phy_mode = fwnode_get_phy_mode(fwnode); + if (phy_mode >= 0) { + if (phy_mode == PHY_INTERFACE_MODE_SGMII || + phy_mode == PHY_INTERFACE_MODE_1000BASEX) + plat->phy_interface = phy_mode; + else + dev_warn(&pdev->dev, "Invalid phy-mode\n"); + } + } + + /* Intel mgbe SGMII interface uses pcs-xcps */ + if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII || + plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { + plat->mdio_bus_data->has_xpcs = true; + plat->mdio_bus_data->xpcs_an_inband = true; + } + + /* For fixed-link setup, we clear xpcs_an_inband */ + if (fwnode) { + struct fwnode_handle *fixed_node; + + fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link"); + if (fixed_node) + plat->mdio_bus_data->xpcs_an_inband = false; + + fwnode_handle_put(fixed_node); + } + + /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ + plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR; + plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; + + plat->int_snapshot_num = AUX_SNAPSHOT1; + plat->ext_snapshot_num = AUX_SNAPSHOT0; + + plat->crosststamp = intel_crosststamp; + plat->int_snapshot_en = 0; + + /* Setup MSI vector offset specific to Intel mGbE controller */ + plat->msi_mac_vec = 29; + plat->msi_lpi_vec = 28; + plat->msi_sfty_ce_vec = 27; + plat->msi_sfty_ue_vec = 26; + plat->msi_rx_base_vec = 0; + plat->msi_tx_base_vec = 1; + + return 0; +} + +static int ehl_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->rx_queues_to_use = 8; + plat->tx_queues_to_use = 8; + plat->use_phy_wol = 1; + + plat->safety_feat_cfg->tsoee = 1; + plat->safety_feat_cfg->mrxpee = 1; + plat->safety_feat_cfg->mestee = 1; + plat->safety_feat_cfg->mrxee = 1; + plat->safety_feat_cfg->mtxee = 1; + plat->safety_feat_cfg->epsi = 0; + plat->safety_feat_cfg->edpp = 0; + plat->safety_feat_cfg->prtyen = 0; + plat->safety_feat_cfg->tmouten = 0; + + return intel_mgbe_common_data(pdev, plat); +} + +static int ehl_sgmii_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->speed_mode_2500 = intel_speed_mode_2500; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + + plat->clk_ptp_rate = 204800000; + + return ehl_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_sgmii1g_info = { + .setup = ehl_sgmii_data, +}; + +static int ehl_rgmii_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_interface = PHY_INTERFACE_MODE_RGMII; + + plat->clk_ptp_rate = 204800000; + + return ehl_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_rgmii1g_info = { + .setup = ehl_rgmii_data, +}; + +static int ehl_pse0_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; + plat->bus_id = 2; + plat->host_dma_width = 32; + + plat->clk_ptp_rate = 200000000; + + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); + + return ehl_common_data(pdev, plat); +} + +static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; + return ehl_pse0_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_pse0_rgmii1g_info = { + .setup = ehl_pse0_rgmii1g_data, +}; + +static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->speed_mode_2500 = intel_speed_mode_2500; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + return ehl_pse0_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { + .setup = ehl_pse0_sgmii1g_data, +}; + +static int ehl_pse1_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; + plat->bus_id = 3; + plat->host_dma_width = 32; + + plat->clk_ptp_rate = 200000000; + + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); + + return ehl_common_data(pdev, plat); +} + +static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; + return ehl_pse1_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_pse1_rgmii1g_info = { + .setup = ehl_pse1_rgmii1g_data, +}; + +static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->speed_mode_2500 = intel_speed_mode_2500; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + return ehl_pse1_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_pse1_sgmii1g_info = { + .setup = ehl_pse1_sgmii1g_data, +}; + +static int tgl_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->rx_queues_to_use = 6; + plat->tx_queues_to_use = 4; + plat->clk_ptp_rate = 204800000; + plat->speed_mode_2500 = intel_speed_mode_2500; + + plat->safety_feat_cfg->tsoee = 1; + plat->safety_feat_cfg->mrxpee = 0; + plat->safety_feat_cfg->mestee = 1; + plat->safety_feat_cfg->mrxee = 1; + plat->safety_feat_cfg->mtxee = 1; + plat->safety_feat_cfg->epsi = 0; + plat->safety_feat_cfg->edpp = 0; + plat->safety_feat_cfg->prtyen = 0; + plat->safety_feat_cfg->tmouten = 0; + + return intel_mgbe_common_data(pdev, plat); +} + +static int tgl_sgmii_phy0_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info tgl_sgmii1g_phy0_info = { + .setup = tgl_sgmii_phy0_data, +}; + +static int tgl_sgmii_phy1_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 2; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info tgl_sgmii1g_phy1_info = { + .setup = tgl_sgmii_phy1_data, +}; + +static int adls_sgmii_phy0_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + + /* SerDes power up and power down are done in BIOS for ADL */ + + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info adls_sgmii1g_phy0_info = { + .setup = adls_sgmii_phy0_data, +}; + +static int adls_sgmii_phy1_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 2; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + + /* SerDes power up and power down are done in BIOS for ADL */ + + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info adls_sgmii1g_phy1_info = { + .setup = adls_sgmii_phy1_data, +}; +static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { + { + .func = 6, + .phy_addr = 1, + }, +}; + +static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = { + .func = galileo_stmmac_func_data, + .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data), +}; + +static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = { + { + .func = 6, + .phy_addr = 1, + }, + { + .func = 7, + .phy_addr = 1, + }, +}; + +static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = { + .func = iot2040_stmmac_func_data, + .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data), +}; + +static const struct dmi_system_id quark_pci_dmi[] = { + { + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"), + }, + .driver_data = (void *)&galileo_stmmac_dmi_data, + }, + { + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), + }, + .driver_data = (void *)&galileo_stmmac_dmi_data, + }, + /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040. + * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which + * has only one pci network device while other asset tags are + * for IOT2040 which has two. + */ + { + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), + DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, + "6ES7647-0AA00-0YA2"), + }, + .driver_data = (void *)&galileo_stmmac_dmi_data, + }, + { + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), + }, + .driver_data = (void *)&iot2040_stmmac_dmi_data, + }, + {} +}; + +static int quark_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + int ret; + + /* Set common default data first */ + common_default_data(plat); + + /* Refuse to load the driver and register net device if MAC controller + * does not connect to any PHY interface. + */ + ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi); + if (ret < 0) { + /* Return error to the caller on DMI enabled boards. */ + if (dmi_get_system_info(DMI_BOARD_NAME)) + return ret; + + /* Galileo boards with old firmware don't support DMI. We always + * use 1 here as PHY address, so at least the first found MAC + * controller would be probed. + */ + ret = 1; + } + + plat->bus_id = pci_dev_id(pdev); + plat->phy_addr = ret; + plat->phy_interface = PHY_INTERFACE_MODE_RMII; + + plat->dma_cfg->pbl = 16; + plat->dma_cfg->pblx8 = true; + plat->dma_cfg->fixed_burst = 1; + /* AXI (TODO) */ + + return 0; +} + +static const struct stmmac_pci_info quark_info = { + .setup = quark_default_data, +}; + +static int stmmac_config_single_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + int ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) { + dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n", + __func__); + return ret; + } + + res->irq = pci_irq_vector(pdev, 0); + res->wol_irq = res->irq; + plat->multi_msi_en = 0; + dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n", + __func__); + + return 0; +} + +static int stmmac_config_multi_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + int ret; + int i; + + if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX || + plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) { + dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n", + __func__); + return -1; + } + + ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + dev_info(&pdev->dev, "%s: multi MSI enablement failed\n", + __func__); + return ret; + } + + /* For RX MSI */ + for (i = 0; i < plat->rx_queues_to_use; i++) { + res->rx_irq[i] = pci_irq_vector(pdev, + plat->msi_rx_base_vec + i * 2); + } + + /* For TX MSI */ + for (i = 0; i < plat->tx_queues_to_use; i++) { + res->tx_irq[i] = pci_irq_vector(pdev, + plat->msi_tx_base_vec + i * 2); + } + + if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX) + res->irq = pci_irq_vector(pdev, plat->msi_mac_vec); + if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX) + res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec); + if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX) + res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec); + if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX) + res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec); + if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) + res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec); + + plat->multi_msi_en = 1; + dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__); + + return 0; +} + +/** + * intel_eth_pci_probe + * + * @pdev: pci device pointer + * @id: pointer to table of device id/id's. + * + * Description: This probing function gets called for all PCI devices which + * match the ID table and are not "owned" by other driver yet. This function + * gets passed a "struct pci_dev *" for each device whose entry in the ID table + * matches the device. The probe functions returns zero when the driver choose + * to take "ownership" of the device or an error code(-ve no) otherwise. + */ +static int intel_eth_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; + struct intel_priv_data *intel_priv; + struct plat_stmmacenet_data *plat; + struct stmmac_resources res; + int ret; + + intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL); + if (!intel_priv) + return -ENOMEM; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + + plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), + GFP_KERNEL); + if (!plat->dma_cfg) + return -ENOMEM; + + plat->safety_feat_cfg = devm_kzalloc(&pdev->dev, + sizeof(*plat->safety_feat_cfg), + GFP_KERNEL); + if (!plat->safety_feat_cfg) + return -ENOMEM; + + /* Enable pci device */ + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", + __func__); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) + return ret; + + pci_set_master(pdev); + + plat->bsp_priv = intel_priv; + intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR; + intel_priv->crossts_adj = 1; + + /* Initialize all MSI vectors to invalid so that it can be set + * according to platform data settings below. + * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX) + */ + plat->msi_mac_vec = STMMAC_MSI_VEC_MAX; + plat->msi_wol_vec = STMMAC_MSI_VEC_MAX; + plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX; + plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX; + plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX; + plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX; + plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX; + + ret = info->setup(pdev, plat); + if (ret) + return ret; + + memset(&res, 0, sizeof(res)); + res.addr = pcim_iomap_table(pdev)[0]; + + if (plat->eee_usecs_rate > 0) { + u32 tx_lpi_usec; + + tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; + writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); + } + + ret = stmmac_config_multi_msi(pdev, plat, &res); + if (ret) { + ret = stmmac_config_single_msi(pdev, plat, &res); + if (ret) { + dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n", + __func__); + goto err_alloc_irq; + } + } + + ret = stmmac_ec_dvr_probe(&pdev->dev, plat, &res); + if (ret) { + goto err_alloc_irq; + } + + return 0; + +err_alloc_irq: + clk_disable_unprepare(plat->stmmac_clk); + clk_unregister_fixed_rate(plat->stmmac_clk); + return ret; +} + +/** + * intel_eth_pci_remove + * + * @pdev: pci device pointer + * Description: this function calls the main to free the net resources + * and releases the PCI resources. + */ +static void intel_eth_pci_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = dev_get_drvdata(&pdev->dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + stmmac_ec_dvr_remove(&pdev->dev); + + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_unregister_fixed_rate(priv->plat->stmmac_clk); +} + +static int __maybe_unused intel_eth_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = stmmac_suspend(dev); + if (ret) + return ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +static int __maybe_unused intel_eth_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + return stmmac_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, + intel_eth_pci_resume); + +#define PCI_DEVICE_ID_INTEL_QUARK 0x0937 +#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30 +#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31 +#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32 +/* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC + * which are named PSE0 and PSE1 + */ +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0 +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1 +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2 +#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac +#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2 +#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac +#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac +#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad +#define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac +#define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G 0x51ac + +static const struct pci_device_id intel_eth_pci_id_table[] = { + { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) }, + {} +}; +//MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); + +static struct pci_driver intel_eth_pci_driver = { + .name = "ec_intel-eth-pci", + .id_table = intel_eth_pci_id_table, + .probe = intel_eth_pci_probe, + .remove = intel_eth_pci_remove, + .driver = { + .pm = &intel_eth_pm_ops, + }, +}; + +static int __init dwmac_init(void) +{ + int ret; + ret = stmmac_init(); + if (ret) { + return ret; + } + ret = pci_register_driver(&intel_eth_pci_driver); + if (ret) { + stmmac_exit(); + } + return ret; +} + +static void __exit dwmac_exit(void) +{ + pci_unregister_driver(&intel_eth_pci_driver); + stmmac_exit(); +} + +module_init(dwmac_init); +module_exit(dwmac_exit); + +MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver (EtherCAT-enabled)"); +MODULE_AUTHOR("Voon Weifeng "); +MODULE_LICENSE("GPL v2"); diff --git a/devices/stmmac/dwmac-intel-6.1-ethercat.h b/devices/stmmac/dwmac-intel-6.1-ethercat.h new file mode 100644 index 00000000..0a379874 --- /dev/null +++ b/devices/stmmac/dwmac-intel-6.1-ethercat.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation + * DWMAC Intel header file + */ + +#ifndef __DWMAC_INTEL_H__ +#define __DWMAC_INTEL_H__ + +#define POLL_DELAY_US 8 + +/* SERDES Register */ +#define SERDES_GCR 0x0 /* Global Conguration */ +#define SERDES_GSR0 0x5 /* Global Status Reg0 */ +#define SERDES_GCR0 0xb /* Global Configuration Reg0 */ + +/* SERDES defines */ +#define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */ +#define SERDES_PHY_RX_CLK BIT(1) /* PSE SGMII PHY rx clk */ +#define SERDES_RST BIT(2) /* Serdes Reset */ +#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/ +#define SERDES_RATE_MASK GENMASK(9, 8) +#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */ +#define SERDES_LINK_MODE_MASK GENMASK(2, 1) +#define SERDES_LINK_MODE_SHIFT 1 +#define SERDES_PWR_ST_SHIFT 4 +#define SERDES_PWR_ST_P0 0x0 +#define SERDES_PWR_ST_P3 0x3 +#define SERDES_LINK_MODE_2G5 0x3 +#define SERSED_LINK_MODE_1G 0x2 +#define SERDES_PCLK_37p5MHZ 0x0 +#define SERDES_PCLK_70MHZ 0x1 +#define SERDES_RATE_PCIE_GEN1 0x0 +#define SERDES_RATE_PCIE_GEN2 0x1 +#define SERDES_RATE_PCIE_SHIFT 8 +#define SERDES_PCLK_SHIFT 12 + +#define INTEL_MGBE_ADHOC_ADDR 0x15 +#define INTEL_MGBE_XPCS_ADDR 0x16 + +/* Cross-timestamping defines */ +#define ART_CPUID_LEAF 0x15 +#define EHL_PSE_ART_MHZ 19200000 + +/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */ +#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_256MHZ (0) +#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_200MHZ (0) + +#endif /* __DWMAC_INTEL_H__ */ diff --git a/devices/stmmac/dwmac-intel-6.1-orig.c b/devices/stmmac/dwmac-intel-6.1-orig.c new file mode 100644 index 00000000..ab9f876b --- /dev/null +++ b/devices/stmmac/dwmac-intel-6.1-orig.c @@ -0,0 +1,1241 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation + */ + +#include +#include +#include +#include "dwmac-intel.h" +#include "dwmac4.h" +#include "stmmac.h" +#include "stmmac_ptp.h" + +struct intel_priv_data { + int mdio_adhoc_addr; /* mdio address for serdes & etc */ + unsigned long crossts_adj; + bool is_pse; +}; + +/* This struct is used to associate PCI Function of MAC controller on a board, + * discovered via DMI, with the address of PHY connected to the MAC. The + * negative value of the address means that MAC controller is not connected + * with PHY. + */ +struct stmmac_pci_func_data { + unsigned int func; + int phy_addr; +}; + +struct stmmac_pci_dmi_data { + const struct stmmac_pci_func_data *func; + size_t nfuncs; +}; + +struct stmmac_pci_info { + int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); +}; + +static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, + const struct dmi_system_id *dmi_list) +{ + const struct stmmac_pci_func_data *func_data; + const struct stmmac_pci_dmi_data *dmi_data; + const struct dmi_system_id *dmi_id; + int func = PCI_FUNC(pdev->devfn); + size_t n; + + dmi_id = dmi_first_match(dmi_list); + if (!dmi_id) + return -ENODEV; + + dmi_data = dmi_id->driver_data; + func_data = dmi_data->func; + + for (n = 0; n < dmi_data->nfuncs; n++, func_data++) + if (func_data->func == func) + return func_data->phy_addr; + + return -ENODEV; +} + +static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr, + int phyreg, u32 mask, u32 val) +{ + unsigned int retries = 10; + int val_rd; + + do { + val_rd = mdiobus_read(priv->mii, phyaddr, phyreg); + if ((val_rd & mask) == (val & mask)) + return 0; + udelay(POLL_DELAY_US); + } while (--retries); + + return -ETIMEDOUT; +} + +static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) +{ + struct intel_priv_data *intel_priv = priv_data; + struct stmmac_priv *priv = netdev_priv(ndev); + int serdes_phy_addr = 0; + u32 data = 0; + + if (!intel_priv->mdio_adhoc_addr) + return 0; + + serdes_phy_addr = intel_priv->mdio_adhoc_addr; + + /* Set the serdes rate and the PCLK rate */ + data = mdiobus_read(priv->mii, serdes_phy_addr, + SERDES_GCR0); + + data &= ~SERDES_RATE_MASK; + data &= ~SERDES_PCLK_MASK; + + if (priv->plat->max_speed == 2500) + data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT | + SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT; + else + data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT | + SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT; + + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* assert clk_req */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + data |= SERDES_PLL_CLK; + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* check for clk_ack assertion */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_PLL_CLK, + SERDES_PLL_CLK); + + if (data) { + dev_err(priv->device, "Serdes PLL clk request timeout\n"); + return data; + } + + /* assert lane reset */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + data |= SERDES_RST; + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* check for assert lane reset reflection */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_RST, + SERDES_RST); + + if (data) { + dev_err(priv->device, "Serdes assert lane reset timeout\n"); + return data; + } + + /* move power state to P0 */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + + data &= ~SERDES_PWR_ST_MASK; + data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT; + + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* Check for P0 state */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_PWR_ST_MASK, + SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT); + + if (data) { + dev_err(priv->device, "Serdes power state P0 timeout.\n"); + return data; + } + + /* PSE only - ungate SGMII PHY Rx Clock */ + if (intel_priv->is_pse) + mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, + 0, SERDES_PHY_RX_CLK); + + return 0; +} + +static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) +{ + struct intel_priv_data *intel_priv = intel_data; + struct stmmac_priv *priv = netdev_priv(ndev); + int serdes_phy_addr = 0; + u32 data = 0; + + if (!intel_priv->mdio_adhoc_addr) + return; + + serdes_phy_addr = intel_priv->mdio_adhoc_addr; + + /* PSE only - gate SGMII PHY Rx Clock */ + if (intel_priv->is_pse) + mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, + SERDES_PHY_RX_CLK, 0); + + /* move power state to P3 */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + + data &= ~SERDES_PWR_ST_MASK; + data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT; + + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* Check for P3 state */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_PWR_ST_MASK, + SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT); + + if (data) { + dev_err(priv->device, "Serdes power state P3 timeout\n"); + return; + } + + /* de-assert clk_req */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + data &= ~SERDES_PLL_CLK; + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* check for clk_ack de-assert */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_PLL_CLK, + (u32)~SERDES_PLL_CLK); + + if (data) { + dev_err(priv->device, "Serdes PLL clk de-assert timeout\n"); + return; + } + + /* de-assert lane reset */ + data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); + data &= ~SERDES_RST; + mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data); + + /* check for de-assert lane reset reflection */ + data = serdes_status_poll(priv, serdes_phy_addr, + SERDES_GSR0, + SERDES_RST, + (u32)~SERDES_RST); + + if (data) { + dev_err(priv->device, "Serdes de-assert lane reset timeout\n"); + return; + } +} + +static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data) +{ + struct intel_priv_data *intel_priv = intel_data; + struct stmmac_priv *priv = netdev_priv(ndev); + int serdes_phy_addr = 0; + u32 data = 0; + + serdes_phy_addr = intel_priv->mdio_adhoc_addr; + + /* Determine the link speed mode: 2.5Gbps/1Gbps */ + data = mdiobus_read(priv->mii, serdes_phy_addr, + SERDES_GCR); + + if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) == + SERDES_LINK_MODE_2G5) { + dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n"); + priv->plat->max_speed = 2500; + priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; + priv->plat->mdio_bus_data->xpcs_an_inband = false; + } else { + priv->plat->max_speed = 1000; + } +} + +/* Program PTP Clock Frequency for different variant of + * Intel mGBE that has slightly different GPO mapping + */ +static void intel_mgbe_ptp_clk_freq_config(void *npriv) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)npriv; + struct intel_priv_data *intel_priv; + u32 gpio_value; + + intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv; + + gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS); + + if (intel_priv->is_pse) { + /* For PSE GbE, use 200MHz */ + gpio_value &= ~PSE_PTP_CLK_FREQ_MASK; + gpio_value |= PSE_PTP_CLK_FREQ_200MHZ; + } else { + /* For PCH GbE, use 200MHz */ + gpio_value &= ~PCH_PTP_CLK_FREQ_MASK; + gpio_value |= PCH_PTP_CLK_FREQ_200MHZ; + } + + writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS); +} + +static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, + u64 *art_time) +{ + u64 ns; + + ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0); + + *art_time = ns; +} + +static int stmmac_cross_ts_isr(struct stmmac_priv *priv) +{ + return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE); +} + +static int intel_crosststamp(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct intel_priv_data *intel_priv; + + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; + unsigned long flags; + u64 art_time = 0; + u64 ptp_time = 0; + u32 num_snapshot; + u32 gpio_value; + u32 acr_value; + int i; + + if (!boot_cpu_has(X86_FEATURE_ART)) + return -EOPNOTSUPP; + + intel_priv = priv->plat->bsp_priv; + + /* Both internal crosstimestamping and external triggered event + * timestamping cannot be run concurrently. + */ + if (priv->plat->ext_snapshot_en) + return -EBUSY; + + priv->plat->int_snapshot_en = 1; + + mutex_lock(&priv->aux_ts_lock); + /* Enable Internal snapshot trigger */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + switch (priv->plat->int_snapshot_num) { + case AUX_SNAPSHOT0: + acr_value |= PTP_ACR_ATSEN0; + break; + case AUX_SNAPSHOT1: + acr_value |= PTP_ACR_ATSEN1; + break; + case AUX_SNAPSHOT2: + acr_value |= PTP_ACR_ATSEN2; + break; + case AUX_SNAPSHOT3: + acr_value |= PTP_ACR_ATSEN3; + break; + default: + mutex_unlock(&priv->aux_ts_lock); + priv->plat->int_snapshot_en = 0; + return -EINVAL; + } + writel(acr_value, ptpaddr + PTP_ACR); + + /* Clear FIFO */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value |= PTP_ACR_ATSFC; + writel(acr_value, ptpaddr + PTP_ACR); + /* Release the mutex */ + mutex_unlock(&priv->aux_ts_lock); + + /* Trigger Internal snapshot signal + * Create a rising edge by just toggle the GPO1 to low + * and back to high. + */ + gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); + gpio_value &= ~GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + gpio_value |= GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + + /* Time sync done Indication - Interrupt method */ + if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait, + stmmac_cross_ts_isr(priv), + HZ / 100)) { + priv->plat->int_snapshot_en = 0; + return -ETIMEDOUT; + } + + num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & + GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + /* Repeat until the timestamps are from the FIFO last segment */ + for (i = 0; i < num_snapshot; i++) { + read_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, ptpaddr, &ptp_time); + *device = ns_to_ktime(ptp_time); + read_unlock_irqrestore(&priv->ptp_lock, flags); + get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time); + *system = convert_art_to_tsc(art_time); + } + + system->cycles *= intel_priv->crossts_adj; + priv->plat->int_snapshot_en = 0; + + return 0; +} + +static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv, + int base) +{ + if (boot_cpu_has(X86_FEATURE_ART)) { + unsigned int art_freq; + + /* On systems that support ART, ART frequency can be obtained + * from ECX register of CPUID leaf (0x15). + */ + art_freq = cpuid_ecx(ART_CPUID_LEAF); + do_div(art_freq, base); + intel_priv->crossts_adj = art_freq; + } +} + +static void common_default_data(struct plat_stmmacenet_data *plat) +{ + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ + plat->has_gmac = 1; + plat->force_sf_dma_mode = 1; + + plat->mdio_bus_data->needs_reset = true; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + /* Set default number of RX and TX queues to use */ + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; + + /* Disable Priority config by default */ + plat->tx_queues_cfg[0].use_prio = false; + plat->rx_queues_cfg[0].use_prio = false; + + /* Disable RX queues routing by default */ + plat->rx_queues_cfg[0].pkt_route = 0x0; +} + +static int intel_mgbe_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct fwnode_handle *fwnode; + char clk_name[20]; + int ret; + int i; + + plat->pdev = pdev; + plat->phy_addr = -1; + plat->clk_csr = 5; + plat->has_gmac = 0; + plat->has_gmac4 = 1; + plat->force_sf_dma_mode = 0; + plat->tso_en = 1; + plat->sph_disable = 1; + + /* Multiplying factor to the clk_eee_i clock time + * period to make it closer to 100 ns. This value + * should be programmed such that the clk_eee_time_period * + * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns + * clk_eee frequency is 19.2Mhz + * clk_eee_time_period is 52ns + * 52ns * (1 + 1) = 104ns + * MULT_FACT_100NS = 1 + */ + plat->mult_fact_100ns = 1; + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + + for (i = 0; i < plat->rx_queues_to_use; i++) { + plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + plat->rx_queues_cfg[i].chan = i; + + /* Disable Priority config by default */ + plat->rx_queues_cfg[i].use_prio = false; + + /* Disable RX queues routing by default */ + plat->rx_queues_cfg[i].pkt_route = 0x0; + } + + for (i = 0; i < plat->tx_queues_to_use; i++) { + plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + + /* Disable Priority config by default */ + plat->tx_queues_cfg[i].use_prio = false; + /* Default TX Q0 to use TSO and rest TXQ for TBS */ + if (i > 0) + plat->tx_queues_cfg[i].tbs_en = 1; + } + + /* FIFO size is 4096 bytes for 1 tx/rx queue */ + plat->tx_fifo_size = plat->tx_queues_to_use * 4096; + plat->rx_fifo_size = plat->rx_queues_to_use * 4096; + + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; + plat->tx_queues_cfg[0].weight = 0x09; + plat->tx_queues_cfg[1].weight = 0x0A; + plat->tx_queues_cfg[2].weight = 0x0B; + plat->tx_queues_cfg[3].weight = 0x0C; + plat->tx_queues_cfg[4].weight = 0x0D; + plat->tx_queues_cfg[5].weight = 0x0E; + plat->tx_queues_cfg[6].weight = 0x0F; + plat->tx_queues_cfg[7].weight = 0x10; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + plat->dma_cfg->fixed_burst = 0; + plat->dma_cfg->mixed_burst = 0; + plat->dma_cfg->aal = 0; + plat->dma_cfg->dche = true; + + plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), + GFP_KERNEL); + if (!plat->axi) + return -ENOMEM; + + plat->axi->axi_lpi_en = 0; + plat->axi->axi_xit_frm = 0; + plat->axi->axi_wr_osr_lmt = 1; + plat->axi->axi_rd_osr_lmt = 1; + plat->axi->axi_blen[0] = 4; + plat->axi->axi_blen[1] = 8; + plat->axi->axi_blen[2] = 16; + + plat->ptp_max_adj = plat->clk_ptp_rate; + plat->eee_usecs_rate = plat->clk_ptp_rate; + + /* Set system clock */ + sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev)); + + plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev, + clk_name, NULL, 0, + plat->clk_ptp_rate); + + if (IS_ERR(plat->stmmac_clk)) { + dev_warn(&pdev->dev, "Fail to register stmmac-clk\n"); + plat->stmmac_clk = NULL; + } + + ret = clk_prepare_enable(plat->stmmac_clk); + if (ret) { + clk_unregister_fixed_rate(plat->stmmac_clk); + return ret; + } + + plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + plat->vlan_fail_q_en = true; + + /* Use the last Rx queue */ + plat->vlan_fail_q = plat->rx_queues_to_use - 1; + + /* For fixed-link setup, we allow phy-mode setting */ + fwnode = dev_fwnode(&pdev->dev); + if (fwnode) { + int phy_mode; + + /* "phy-mode" setting is optional. If it is set, + * we allow either sgmii or 1000base-x for now. + */ + phy_mode = fwnode_get_phy_mode(fwnode); + if (phy_mode >= 0) { + if (phy_mode == PHY_INTERFACE_MODE_SGMII || + phy_mode == PHY_INTERFACE_MODE_1000BASEX) + plat->phy_interface = phy_mode; + else + dev_warn(&pdev->dev, "Invalid phy-mode\n"); + } + } + + /* Intel mgbe SGMII interface uses pcs-xcps */ + if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII || + plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { + plat->mdio_bus_data->has_xpcs = true; + plat->mdio_bus_data->xpcs_an_inband = true; + } + + /* For fixed-link setup, we clear xpcs_an_inband */ + if (fwnode) { + struct fwnode_handle *fixed_node; + + fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link"); + if (fixed_node) + plat->mdio_bus_data->xpcs_an_inband = false; + + fwnode_handle_put(fixed_node); + } + + /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ + plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR; + plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; + + plat->int_snapshot_num = AUX_SNAPSHOT1; + plat->ext_snapshot_num = AUX_SNAPSHOT0; + + plat->crosststamp = intel_crosststamp; + plat->int_snapshot_en = 0; + + /* Setup MSI vector offset specific to Intel mGbE controller */ + plat->msi_mac_vec = 29; + plat->msi_lpi_vec = 28; + plat->msi_sfty_ce_vec = 27; + plat->msi_sfty_ue_vec = 26; + plat->msi_rx_base_vec = 0; + plat->msi_tx_base_vec = 1; + + return 0; +} + +static int ehl_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->rx_queues_to_use = 8; + plat->tx_queues_to_use = 8; + plat->use_phy_wol = 1; + + plat->safety_feat_cfg->tsoee = 1; + plat->safety_feat_cfg->mrxpee = 1; + plat->safety_feat_cfg->mestee = 1; + plat->safety_feat_cfg->mrxee = 1; + plat->safety_feat_cfg->mtxee = 1; + plat->safety_feat_cfg->epsi = 0; + plat->safety_feat_cfg->edpp = 0; + plat->safety_feat_cfg->prtyen = 0; + plat->safety_feat_cfg->tmouten = 0; + + return intel_mgbe_common_data(pdev, plat); +} + +static int ehl_sgmii_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->speed_mode_2500 = intel_speed_mode_2500; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + + plat->clk_ptp_rate = 204800000; + + return ehl_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_sgmii1g_info = { + .setup = ehl_sgmii_data, +}; + +static int ehl_rgmii_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_interface = PHY_INTERFACE_MODE_RGMII; + + plat->clk_ptp_rate = 204800000; + + return ehl_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_rgmii1g_info = { + .setup = ehl_rgmii_data, +}; + +static int ehl_pse0_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; + plat->bus_id = 2; + plat->host_dma_width = 32; + + plat->clk_ptp_rate = 200000000; + + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); + + return ehl_common_data(pdev, plat); +} + +static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; + return ehl_pse0_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_pse0_rgmii1g_info = { + .setup = ehl_pse0_rgmii1g_data, +}; + +static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->speed_mode_2500 = intel_speed_mode_2500; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + return ehl_pse0_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { + .setup = ehl_pse0_sgmii1g_data, +}; + +static int ehl_pse1_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; + plat->bus_id = 3; + plat->host_dma_width = 32; + + plat->clk_ptp_rate = 200000000; + + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); + + return ehl_common_data(pdev, plat); +} + +static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; + return ehl_pse1_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_pse1_rgmii1g_info = { + .setup = ehl_pse1_rgmii1g_data, +}; + +static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->speed_mode_2500 = intel_speed_mode_2500; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + return ehl_pse1_common_data(pdev, plat); +} + +static struct stmmac_pci_info ehl_pse1_sgmii1g_info = { + .setup = ehl_pse1_sgmii1g_data, +}; + +static int tgl_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->rx_queues_to_use = 6; + plat->tx_queues_to_use = 4; + plat->clk_ptp_rate = 204800000; + plat->speed_mode_2500 = intel_speed_mode_2500; + + plat->safety_feat_cfg->tsoee = 1; + plat->safety_feat_cfg->mrxpee = 0; + plat->safety_feat_cfg->mestee = 1; + plat->safety_feat_cfg->mrxee = 1; + plat->safety_feat_cfg->mtxee = 1; + plat->safety_feat_cfg->epsi = 0; + plat->safety_feat_cfg->edpp = 0; + plat->safety_feat_cfg->prtyen = 0; + plat->safety_feat_cfg->tmouten = 0; + + return intel_mgbe_common_data(pdev, plat); +} + +static int tgl_sgmii_phy0_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info tgl_sgmii1g_phy0_info = { + .setup = tgl_sgmii_phy0_data, +}; + +static int tgl_sgmii_phy1_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 2; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info tgl_sgmii1g_phy1_info = { + .setup = tgl_sgmii_phy1_data, +}; + +static int adls_sgmii_phy0_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + + /* SerDes power up and power down are done in BIOS for ADL */ + + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info adls_sgmii1g_phy0_info = { + .setup = adls_sgmii_phy0_data, +}; + +static int adls_sgmii_phy1_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 2; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + + /* SerDes power up and power down are done in BIOS for ADL */ + + return tgl_common_data(pdev, plat); +} + +static struct stmmac_pci_info adls_sgmii1g_phy1_info = { + .setup = adls_sgmii_phy1_data, +}; +static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { + { + .func = 6, + .phy_addr = 1, + }, +}; + +static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = { + .func = galileo_stmmac_func_data, + .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data), +}; + +static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = { + { + .func = 6, + .phy_addr = 1, + }, + { + .func = 7, + .phy_addr = 1, + }, +}; + +static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = { + .func = iot2040_stmmac_func_data, + .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data), +}; + +static const struct dmi_system_id quark_pci_dmi[] = { + { + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"), + }, + .driver_data = (void *)&galileo_stmmac_dmi_data, + }, + { + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), + }, + .driver_data = (void *)&galileo_stmmac_dmi_data, + }, + /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040. + * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which + * has only one pci network device while other asset tags are + * for IOT2040 which has two. + */ + { + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), + DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, + "6ES7647-0AA00-0YA2"), + }, + .driver_data = (void *)&galileo_stmmac_dmi_data, + }, + { + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), + }, + .driver_data = (void *)&iot2040_stmmac_dmi_data, + }, + {} +}; + +static int quark_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + int ret; + + /* Set common default data first */ + common_default_data(plat); + + /* Refuse to load the driver and register net device if MAC controller + * does not connect to any PHY interface. + */ + ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi); + if (ret < 0) { + /* Return error to the caller on DMI enabled boards. */ + if (dmi_get_system_info(DMI_BOARD_NAME)) + return ret; + + /* Galileo boards with old firmware don't support DMI. We always + * use 1 here as PHY address, so at least the first found MAC + * controller would be probed. + */ + ret = 1; + } + + plat->bus_id = pci_dev_id(pdev); + plat->phy_addr = ret; + plat->phy_interface = PHY_INTERFACE_MODE_RMII; + + plat->dma_cfg->pbl = 16; + plat->dma_cfg->pblx8 = true; + plat->dma_cfg->fixed_burst = 1; + /* AXI (TODO) */ + + return 0; +} + +static const struct stmmac_pci_info quark_info = { + .setup = quark_default_data, +}; + +static int stmmac_config_single_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + int ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) { + dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n", + __func__); + return ret; + } + + res->irq = pci_irq_vector(pdev, 0); + res->wol_irq = res->irq; + plat->multi_msi_en = 0; + dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n", + __func__); + + return 0; +} + +static int stmmac_config_multi_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + int ret; + int i; + + if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX || + plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) { + dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n", + __func__); + return -1; + } + + ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + dev_info(&pdev->dev, "%s: multi MSI enablement failed\n", + __func__); + return ret; + } + + /* For RX MSI */ + for (i = 0; i < plat->rx_queues_to_use; i++) { + res->rx_irq[i] = pci_irq_vector(pdev, + plat->msi_rx_base_vec + i * 2); + } + + /* For TX MSI */ + for (i = 0; i < plat->tx_queues_to_use; i++) { + res->tx_irq[i] = pci_irq_vector(pdev, + plat->msi_tx_base_vec + i * 2); + } + + if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX) + res->irq = pci_irq_vector(pdev, plat->msi_mac_vec); + if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX) + res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec); + if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX) + res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec); + if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX) + res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec); + if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) + res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec); + + plat->multi_msi_en = 1; + dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__); + + return 0; +} + +/** + * intel_eth_pci_probe + * + * @pdev: pci device pointer + * @id: pointer to table of device id/id's. + * + * Description: This probing function gets called for all PCI devices which + * match the ID table and are not "owned" by other driver yet. This function + * gets passed a "struct pci_dev *" for each device whose entry in the ID table + * matches the device. The probe functions returns zero when the driver choose + * to take "ownership" of the device or an error code(-ve no) otherwise. + */ +static int intel_eth_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; + struct intel_priv_data *intel_priv; + struct plat_stmmacenet_data *plat; + struct stmmac_resources res; + int ret; + + intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL); + if (!intel_priv) + return -ENOMEM; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + + plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), + GFP_KERNEL); + if (!plat->dma_cfg) + return -ENOMEM; + + plat->safety_feat_cfg = devm_kzalloc(&pdev->dev, + sizeof(*plat->safety_feat_cfg), + GFP_KERNEL); + if (!plat->safety_feat_cfg) + return -ENOMEM; + + /* Enable pci device */ + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", + __func__); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) + return ret; + + pci_set_master(pdev); + + plat->bsp_priv = intel_priv; + intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR; + intel_priv->crossts_adj = 1; + + /* Initialize all MSI vectors to invalid so that it can be set + * according to platform data settings below. + * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX) + */ + plat->msi_mac_vec = STMMAC_MSI_VEC_MAX; + plat->msi_wol_vec = STMMAC_MSI_VEC_MAX; + plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX; + plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX; + plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX; + plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX; + plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX; + + ret = info->setup(pdev, plat); + if (ret) + return ret; + + memset(&res, 0, sizeof(res)); + res.addr = pcim_iomap_table(pdev)[0]; + + if (plat->eee_usecs_rate > 0) { + u32 tx_lpi_usec; + + tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; + writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); + } + + ret = stmmac_config_multi_msi(pdev, plat, &res); + if (ret) { + ret = stmmac_config_single_msi(pdev, plat, &res); + if (ret) { + dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n", + __func__); + goto err_alloc_irq; + } + } + + ret = stmmac_dvr_probe(&pdev->dev, plat, &res); + if (ret) { + goto err_alloc_irq; + } + + return 0; + +err_alloc_irq: + clk_disable_unprepare(plat->stmmac_clk); + clk_unregister_fixed_rate(plat->stmmac_clk); + return ret; +} + +/** + * intel_eth_pci_remove + * + * @pdev: pci device pointer + * Description: this function calls the main to free the net resources + * and releases the PCI resources. + */ +static void intel_eth_pci_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = dev_get_drvdata(&pdev->dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + stmmac_dvr_remove(&pdev->dev); + + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_unregister_fixed_rate(priv->plat->stmmac_clk); +} + +static int __maybe_unused intel_eth_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = stmmac_suspend(dev); + if (ret) + return ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +static int __maybe_unused intel_eth_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + return stmmac_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, + intel_eth_pci_resume); + +#define PCI_DEVICE_ID_INTEL_QUARK 0x0937 +#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30 +#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31 +#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32 +/* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC + * which are named PSE0 and PSE1 + */ +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0 +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1 +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2 +#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac +#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2 +#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac +#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac +#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad +#define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac +#define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G 0x51ac + +static const struct pci_device_id intel_eth_pci_id_table[] = { + { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) }, + {} +}; +MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); + +static struct pci_driver intel_eth_pci_driver = { + .name = "intel-eth-pci", + .id_table = intel_eth_pci_id_table, + .probe = intel_eth_pci_probe, + .remove = intel_eth_pci_remove, + .driver = { + .pm = &intel_eth_pm_ops, + }, +}; + +module_pci_driver(intel_eth_pci_driver); + +MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver"); +MODULE_AUTHOR("Voon Weifeng "); +MODULE_LICENSE("GPL v2"); diff --git a/devices/stmmac/dwmac-intel-6.1-orig.h b/devices/stmmac/dwmac-intel-6.1-orig.h new file mode 100644 index 00000000..0a379874 --- /dev/null +++ b/devices/stmmac/dwmac-intel-6.1-orig.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation + * DWMAC Intel header file + */ + +#ifndef __DWMAC_INTEL_H__ +#define __DWMAC_INTEL_H__ + +#define POLL_DELAY_US 8 + +/* SERDES Register */ +#define SERDES_GCR 0x0 /* Global Conguration */ +#define SERDES_GSR0 0x5 /* Global Status Reg0 */ +#define SERDES_GCR0 0xb /* Global Configuration Reg0 */ + +/* SERDES defines */ +#define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */ +#define SERDES_PHY_RX_CLK BIT(1) /* PSE SGMII PHY rx clk */ +#define SERDES_RST BIT(2) /* Serdes Reset */ +#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/ +#define SERDES_RATE_MASK GENMASK(9, 8) +#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */ +#define SERDES_LINK_MODE_MASK GENMASK(2, 1) +#define SERDES_LINK_MODE_SHIFT 1 +#define SERDES_PWR_ST_SHIFT 4 +#define SERDES_PWR_ST_P0 0x0 +#define SERDES_PWR_ST_P3 0x3 +#define SERDES_LINK_MODE_2G5 0x3 +#define SERSED_LINK_MODE_1G 0x2 +#define SERDES_PCLK_37p5MHZ 0x0 +#define SERDES_PCLK_70MHZ 0x1 +#define SERDES_RATE_PCIE_GEN1 0x0 +#define SERDES_RATE_PCIE_GEN2 0x1 +#define SERDES_RATE_PCIE_SHIFT 8 +#define SERDES_PCLK_SHIFT 12 + +#define INTEL_MGBE_ADHOC_ADDR 0x15 +#define INTEL_MGBE_XPCS_ADDR 0x16 + +/* Cross-timestamping defines */ +#define ART_CPUID_LEAF 0x15 +#define EHL_PSE_ART_MHZ 19200000 + +/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */ +#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_256MHZ (0) +#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_200MHZ (0) + +#endif /* __DWMAC_INTEL_H__ */ diff --git a/devices/stmmac/dwmac100-6.1-ethercat.h b/devices/stmmac/dwmac100-6.1-ethercat.h new file mode 100644 index 00000000..7df42f05 --- /dev/null +++ b/devices/stmmac/dwmac100-6.1-ethercat.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + MAC 10/100 Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DWMAC100_H__ +#define __DWMAC100_H__ + +#include +#include "common-6.1-ethercat.h" + +/*---------------------------------------------------------------------------- + * MAC BLOCK defines + *---------------------------------------------------------------------------*/ +/* MAC CSR offset */ +#define MAC_CONTROL 0x00000000 /* MAC Control */ +#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */ +#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */ +#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */ +#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */ +#define MAC_MII_ADDR 0x00000014 /* MII Address */ +#define MAC_MII_DATA 0x00000018 /* MII Data */ +#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */ +#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */ +#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */ + +/* MAC CTRL defines */ +#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */ +#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */ +#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */ +#define MAC_CONTROL_PS 0x08000000 /* Port Select */ +#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */ +#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */ +#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */ +#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */ +#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */ +#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */ +#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */ +#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */ +#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */ +#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */ +#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */ +#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */ +#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */ +#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */ +#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */ +#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */ +#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */ +#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */ +#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */ +#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define MAC_CORE_INIT (MAC_CONTROL_HBD) + +/* MAC FLOW CTRL defines */ +#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define MAC_FLOW_CTRL_PT_SHIFT 16 +#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */ +#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */ +#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */ + +/* MII ADDR defines */ +#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ + +/*---------------------------------------------------------------------------- + * DMA BLOCK defines + *---------------------------------------------------------------------------*/ + +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */ +#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */ +#define DMA_BUS_MODE_DEFAULT 0x00000000 + +/* DMA Control register defines */ +#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */ + +/* Transmit Threshold Control */ +enum ttc_control { + DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */ + DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */ + DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */ + DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */ + DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */ + DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */ + DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */ + DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */ +}; + +/* STMAC110 DMA Missed Frame Counter register defines */ +#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */ +#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */ +#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ +#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ + +extern const struct stmmac_dma_ops dwmac100_dma_ops; + +#endif /* __DWMAC100_H__ */ diff --git a/devices/stmmac/dwmac100-6.1-orig.h b/devices/stmmac/dwmac100-6.1-orig.h new file mode 100644 index 00000000..7ab791c8 --- /dev/null +++ b/devices/stmmac/dwmac100-6.1-orig.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + MAC 10/100 Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DWMAC100_H__ +#define __DWMAC100_H__ + +#include +#include "common.h" + +/*---------------------------------------------------------------------------- + * MAC BLOCK defines + *---------------------------------------------------------------------------*/ +/* MAC CSR offset */ +#define MAC_CONTROL 0x00000000 /* MAC Control */ +#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */ +#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */ +#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */ +#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */ +#define MAC_MII_ADDR 0x00000014 /* MII Address */ +#define MAC_MII_DATA 0x00000018 /* MII Data */ +#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */ +#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */ +#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */ + +/* MAC CTRL defines */ +#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */ +#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */ +#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */ +#define MAC_CONTROL_PS 0x08000000 /* Port Select */ +#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */ +#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */ +#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */ +#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */ +#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */ +#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */ +#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */ +#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */ +#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */ +#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */ +#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */ +#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */ +#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */ +#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */ +#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */ +#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */ +#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */ +#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */ +#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */ +#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define MAC_CORE_INIT (MAC_CONTROL_HBD) + +/* MAC FLOW CTRL defines */ +#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define MAC_FLOW_CTRL_PT_SHIFT 16 +#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */ +#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */ +#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */ + +/* MII ADDR defines */ +#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ + +/*---------------------------------------------------------------------------- + * DMA BLOCK defines + *---------------------------------------------------------------------------*/ + +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */ +#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */ +#define DMA_BUS_MODE_DEFAULT 0x00000000 + +/* DMA Control register defines */ +#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */ + +/* Transmit Threshold Control */ +enum ttc_control { + DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */ + DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */ + DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */ + DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */ + DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */ + DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */ + DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */ + DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */ +}; + +/* STMAC110 DMA Missed Frame Counter register defines */ +#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */ +#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */ +#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ +#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ + +extern const struct stmmac_dma_ops dwmac100_dma_ops; + +#endif /* __DWMAC100_H__ */ diff --git a/devices/stmmac/dwmac1000-6.1-ethercat.h b/devices/stmmac/dwmac1000-6.1-ethercat.h new file mode 100644 index 00000000..5219e0ef --- /dev/null +++ b/devices/stmmac/dwmac1000-6.1-ethercat.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ +#ifndef __DWMAC1000_H__ +#define __DWMAC1000_H__ + +#include +#include "common-6.1-ethercat.h" + +#define GMAC_CONTROL 0x00000000 /* Configuration */ +#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ +#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ +#define GMAC_MII_ADDR 0x00000010 /* MII Address */ +#define GMAC_MII_DATA 0x00000014 /* MII Data */ +#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ +#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ +#define GMAC_DEBUG 0x00000024 /* GMAC debug register */ +#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ + +#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ +#define GMAC_INT_STATUS_PMT BIT(3) +#define GMAC_INT_STATUS_MMCIS BIT(4) +#define GMAC_INT_STATUS_MMCRIS BIT(5) +#define GMAC_INT_STATUS_MMCTIS BIT(6) +#define GMAC_INT_STATUS_MMCCSUM BIT(7) +#define GMAC_INT_STATUS_TSTAMP BIT(9) +#define GMAC_INT_STATUS_LPIIS BIT(10) + +/* interrupt mask register */ +#define GMAC_INT_MASK 0x0000003c +#define GMAC_INT_DISABLE_RGMII BIT(0) +#define GMAC_INT_DISABLE_PCSLINK BIT(1) +#define GMAC_INT_DISABLE_PCSAN BIT(2) +#define GMAC_INT_DISABLE_PMT BIT(3) +#define GMAC_INT_DISABLE_TIMESTAMP BIT(9) +#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \ + GMAC_INT_DISABLE_PCSLINK | \ + GMAC_INT_DISABLE_PCSAN) +#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \ + GMAC_INT_DISABLE_PCS) + +/* PMT Control and Status */ +#define GMAC_PMT 0x0000002c +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +/* Energy Efficient Ethernet (EEE) + * + * LPI status, timer and control register offset + */ +#define LPI_CTRL_STATUS 0x0030 +#define LPI_TIMER_CTRL 0x0034 + +/* LPI control and status defines */ +#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */ +#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */ +#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */ +#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */ +#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */ +#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */ +#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */ +#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */ +#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */ +#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */ + +/* GMAC HW ADDR regs */ +#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \ + 0x00000040 + (reg * 8)) +#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \ + 0x00000044 + (reg * 8)) +#define GMAC_MAX_PERFECT_ADDRESSES 1 + +#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */ +#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */ + +/* SGMII/RGMII status register */ +#define GMAC_RGSMIIIS_LNKMODE BIT(0) +#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1) +#define GMAC_RGSMIIIS_SPEED_SHIFT 1 +#define GMAC_RGSMIIIS_LNKSTS BIT(3) +#define GMAC_RGSMIIIS_JABTO BIT(4) +#define GMAC_RGSMIIIS_FALSECARDET BIT(5) +#define GMAC_RGSMIIIS_SMIDRXS BIT(16) +/* LNKMOD */ +#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1 +/* LNKSPEED */ +#define GMAC_RGSMIIIS_SPEED_125 0x2 +#define GMAC_RGSMIIIS_SPEED_25 0x1 +#define GMAC_RGSMIIIS_SPEED_2_5 0x0 + +/* GMAC Configuration defines */ +#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */ +#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ +#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */ +#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */ +#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */ +#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ +enum inter_frame_gap { + GMAC_CONTROL_IFG_88 = 0x00040000, + GMAC_CONTROL_IFG_80 = 0x00020000, + GMAC_CONTROL_IFG_40 = 0x000e0000, +}; +#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */ +#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ +#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ +#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ +#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ +#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ +#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ +#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */ +#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ +#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | \ + GMAC_CONTROL_BE | GMAC_CONTROL_DCRS) + +/* GMAC Frame Filter defines */ +#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */ +#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ +/* GMII ADDR defines */ +#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ +/* GMAC FLOW CTRL defines */ +#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define GMAC_FLOW_CTRL_PT_SHIFT 16 +#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ +#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ + +/* DEBUG Register defines */ +/* MTL TxStatus FIFO */ +#define GMAC_DEBUG_TXSTSFSTS BIT(25) /* MTL TxStatus FIFO Full Status */ +#define GMAC_DEBUG_TXFSTS BIT(24) /* MTL Tx FIFO Not Empty Status */ +#define GMAC_DEBUG_TWCSTS BIT(22) /* MTL Tx FIFO Write Controller */ +/* MTL Tx FIFO Read Controller Status */ +#define GMAC_DEBUG_TRCSTS_MASK GENMASK(21, 20) +#define GMAC_DEBUG_TRCSTS_SHIFT 20 +#define GMAC_DEBUG_TRCSTS_IDLE 0 +#define GMAC_DEBUG_TRCSTS_READ 1 +#define GMAC_DEBUG_TRCSTS_TXW 2 +#define GMAC_DEBUG_TRCSTS_WRITE 3 +#define GMAC_DEBUG_TXPAUSED BIT(19) /* MAC Transmitter in PAUSE */ +/* MAC Transmit Frame Controller Status */ +#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) +#define GMAC_DEBUG_TFCSTS_SHIFT 17 +#define GMAC_DEBUG_TFCSTS_IDLE 0 +#define GMAC_DEBUG_TFCSTS_WAIT 1 +#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2 +#define GMAC_DEBUG_TFCSTS_XFER 3 +/* MAC GMII or MII Transmit Protocol Engine Status */ +#define GMAC_DEBUG_TPESTS BIT(16) +#define GMAC_DEBUG_RXFSTS_MASK GENMASK(9, 8) /* MTL Rx FIFO Fill-level */ +#define GMAC_DEBUG_RXFSTS_SHIFT 8 +#define GMAC_DEBUG_RXFSTS_EMPTY 0 +#define GMAC_DEBUG_RXFSTS_BT 1 +#define GMAC_DEBUG_RXFSTS_AT 2 +#define GMAC_DEBUG_RXFSTS_FULL 3 +#define GMAC_DEBUG_RRCSTS_MASK GENMASK(6, 5) /* MTL Rx FIFO Read Controller */ +#define GMAC_DEBUG_RRCSTS_SHIFT 5 +#define GMAC_DEBUG_RRCSTS_IDLE 0 +#define GMAC_DEBUG_RRCSTS_RDATA 1 +#define GMAC_DEBUG_RRCSTS_RSTAT 2 +#define GMAC_DEBUG_RRCSTS_FLUSH 3 +#define GMAC_DEBUG_RWCSTS BIT(4) /* MTL Rx FIFO Write Controller Active */ +/* MAC Receive Frame Controller FIFO Status */ +#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1) +#define GMAC_DEBUG_RFCFCSTS_SHIFT 1 +/* MAC GMII or MII Receive Protocol Engine Status */ +#define GMAC_DEBUG_RPESTS BIT(0) + +/*--- DMA BLOCK defines ---*/ +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +/* Programmable burst length (passed thorugh platform)*/ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ + +enum rx_tx_priority_ratio { + double_ratio = 0x00004000, /* 2:1 */ + triple_ratio = 0x00008000, /* 3:1 */ + quadruple_ratio = 0x0000c000, /* 4:1 */ +}; + +#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ +#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */ +#define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */ +#define DMA_BUS_MODE_RPBL_SHIFT 17 +#define DMA_BUS_MODE_USP 0x00800000 +#define DMA_BUS_MODE_MAXPBL 0x01000000 +#define DMA_BUS_MODE_AAL 0x02000000 + +/* DMA CRS Control and Status Register Mapping */ +#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */ +#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */ +/* DMA Bus Mode register defines */ +#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ +#define DMA_BUS_PR_RATIO_SHIFT 14 +#define DMA_BUS_FB 0x00010000 /* Fixed Burst */ + +/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/ +/* Disable Drop TCP/IP csum error */ +#define DMA_CONTROL_DT 0x04000000 +#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */ +#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */ +/* Threshold for Activating the FC */ +enum rfa { + act_full_minus_1 = 0x00800000, + act_full_minus_2 = 0x00800200, + act_full_minus_3 = 0x00800400, + act_full_minus_4 = 0x00800600, +}; +/* Threshold for Deactivating the FC */ +enum rfd { + deac_full_minus_1 = 0x00400000, + deac_full_minus_2 = 0x00400800, + deac_full_minus_3 = 0x00401000, + deac_full_minus_4 = 0x00401800, +}; +#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ + +enum ttc_control { + DMA_CONTROL_TTC_64 = 0x00000000, + DMA_CONTROL_TTC_128 = 0x00004000, + DMA_CONTROL_TTC_192 = 0x00008000, + DMA_CONTROL_TTC_256 = 0x0000c000, + DMA_CONTROL_TTC_40 = 0x00010000, + DMA_CONTROL_TTC_32 = 0x00014000, + DMA_CONTROL_TTC_24 = 0x00018000, + DMA_CONTROL_TTC_16 = 0x0001c000, +}; +#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff + +#define DMA_CONTROL_EFC 0x00000100 +#define DMA_CONTROL_FEF 0x00000080 +#define DMA_CONTROL_FUF 0x00000040 + +/* Receive flow control activation field + * RFA field in DMA control register, bits 23,10:9 + */ +#define DMA_CONTROL_RFA_MASK 0x00800600 + +/* Receive flow control deactivation field + * RFD field in DMA control register, bits 22,12:11 + */ +#define DMA_CONTROL_RFD_MASK 0x00401800 + +/* RFD and RFA fields are encoded as follows + * + * Bit Field + * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled) + * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,11 - Reserved + * + * RFD should always be > RFA for a given FIFO size. RFD == RFA may work, + * but packet throughput performance may not be as expected. + * + * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame + * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause + * Description). + * + * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6), + * is set to 0. This allows pause frames with a quanta of 0 to be sent + * as an XOFF message to the link peer. + */ + +#define RFA_FULL_MINUS_1K 0x00000000 +#define RFA_FULL_MINUS_2K 0x00000200 +#define RFA_FULL_MINUS_3K 0x00000400 +#define RFA_FULL_MINUS_4K 0x00000600 +#define RFA_FULL_MINUS_5K 0x00800000 +#define RFA_FULL_MINUS_6K 0x00800200 +#define RFA_FULL_MINUS_7K 0x00800400 + +#define RFD_FULL_MINUS_1K 0x00000000 +#define RFD_FULL_MINUS_2K 0x00000800 +#define RFD_FULL_MINUS_3K 0x00001000 +#define RFD_FULL_MINUS_4K 0x00001800 +#define RFD_FULL_MINUS_5K 0x00400000 +#define RFD_FULL_MINUS_6K 0x00400800 +#define RFD_FULL_MINUS_7K 0x00401000 + +enum rtc_control { + DMA_CONTROL_RTC_64 = 0x00000000, + DMA_CONTROL_RTC_32 = 0x00000008, + DMA_CONTROL_RTC_96 = 0x00000010, + DMA_CONTROL_RTC_128 = 0x00000018, +}; +#define DMA_CONTROL_TC_RX_MASK 0xffffffe7 + +#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */ + +/* MMC registers offset */ +#define GMAC_MMC_CTRL 0x100 +#define GMAC_MMC_RX_INTR 0x104 +#define GMAC_MMC_TX_INTR 0x108 +#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 +#define GMAC_EXTHASH_BASE 0x500 + +extern const struct stmmac_dma_ops dwmac1000_dma_ops; +#endif /* __DWMAC1000_H__ */ diff --git a/devices/stmmac/dwmac1000-6.1-orig.h b/devices/stmmac/dwmac1000-6.1-orig.h new file mode 100644 index 00000000..4296ddda --- /dev/null +++ b/devices/stmmac/dwmac1000-6.1-orig.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ +#ifndef __DWMAC1000_H__ +#define __DWMAC1000_H__ + +#include +#include "common.h" + +#define GMAC_CONTROL 0x00000000 /* Configuration */ +#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ +#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ +#define GMAC_MII_ADDR 0x00000010 /* MII Address */ +#define GMAC_MII_DATA 0x00000014 /* MII Data */ +#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ +#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ +#define GMAC_DEBUG 0x00000024 /* GMAC debug register */ +#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ + +#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ +#define GMAC_INT_STATUS_PMT BIT(3) +#define GMAC_INT_STATUS_MMCIS BIT(4) +#define GMAC_INT_STATUS_MMCRIS BIT(5) +#define GMAC_INT_STATUS_MMCTIS BIT(6) +#define GMAC_INT_STATUS_MMCCSUM BIT(7) +#define GMAC_INT_STATUS_TSTAMP BIT(9) +#define GMAC_INT_STATUS_LPIIS BIT(10) + +/* interrupt mask register */ +#define GMAC_INT_MASK 0x0000003c +#define GMAC_INT_DISABLE_RGMII BIT(0) +#define GMAC_INT_DISABLE_PCSLINK BIT(1) +#define GMAC_INT_DISABLE_PCSAN BIT(2) +#define GMAC_INT_DISABLE_PMT BIT(3) +#define GMAC_INT_DISABLE_TIMESTAMP BIT(9) +#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \ + GMAC_INT_DISABLE_PCSLINK | \ + GMAC_INT_DISABLE_PCSAN) +#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \ + GMAC_INT_DISABLE_PCS) + +/* PMT Control and Status */ +#define GMAC_PMT 0x0000002c +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +/* Energy Efficient Ethernet (EEE) + * + * LPI status, timer and control register offset + */ +#define LPI_CTRL_STATUS 0x0030 +#define LPI_TIMER_CTRL 0x0034 + +/* LPI control and status defines */ +#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */ +#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */ +#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */ +#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */ +#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */ +#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */ +#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */ +#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */ +#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */ +#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */ + +/* GMAC HW ADDR regs */ +#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \ + 0x00000040 + (reg * 8)) +#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \ + 0x00000044 + (reg * 8)) +#define GMAC_MAX_PERFECT_ADDRESSES 1 + +#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */ +#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */ + +/* SGMII/RGMII status register */ +#define GMAC_RGSMIIIS_LNKMODE BIT(0) +#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1) +#define GMAC_RGSMIIIS_SPEED_SHIFT 1 +#define GMAC_RGSMIIIS_LNKSTS BIT(3) +#define GMAC_RGSMIIIS_JABTO BIT(4) +#define GMAC_RGSMIIIS_FALSECARDET BIT(5) +#define GMAC_RGSMIIIS_SMIDRXS BIT(16) +/* LNKMOD */ +#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1 +/* LNKSPEED */ +#define GMAC_RGSMIIIS_SPEED_125 0x2 +#define GMAC_RGSMIIIS_SPEED_25 0x1 +#define GMAC_RGSMIIIS_SPEED_2_5 0x0 + +/* GMAC Configuration defines */ +#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */ +#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ +#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */ +#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */ +#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */ +#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ +enum inter_frame_gap { + GMAC_CONTROL_IFG_88 = 0x00040000, + GMAC_CONTROL_IFG_80 = 0x00020000, + GMAC_CONTROL_IFG_40 = 0x000e0000, +}; +#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */ +#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ +#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ +#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ +#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ +#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ +#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ +#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */ +#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ +#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | \ + GMAC_CONTROL_BE | GMAC_CONTROL_DCRS) + +/* GMAC Frame Filter defines */ +#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */ +#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ +/* GMII ADDR defines */ +#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ +/* GMAC FLOW CTRL defines */ +#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define GMAC_FLOW_CTRL_PT_SHIFT 16 +#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ +#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ + +/* DEBUG Register defines */ +/* MTL TxStatus FIFO */ +#define GMAC_DEBUG_TXSTSFSTS BIT(25) /* MTL TxStatus FIFO Full Status */ +#define GMAC_DEBUG_TXFSTS BIT(24) /* MTL Tx FIFO Not Empty Status */ +#define GMAC_DEBUG_TWCSTS BIT(22) /* MTL Tx FIFO Write Controller */ +/* MTL Tx FIFO Read Controller Status */ +#define GMAC_DEBUG_TRCSTS_MASK GENMASK(21, 20) +#define GMAC_DEBUG_TRCSTS_SHIFT 20 +#define GMAC_DEBUG_TRCSTS_IDLE 0 +#define GMAC_DEBUG_TRCSTS_READ 1 +#define GMAC_DEBUG_TRCSTS_TXW 2 +#define GMAC_DEBUG_TRCSTS_WRITE 3 +#define GMAC_DEBUG_TXPAUSED BIT(19) /* MAC Transmitter in PAUSE */ +/* MAC Transmit Frame Controller Status */ +#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) +#define GMAC_DEBUG_TFCSTS_SHIFT 17 +#define GMAC_DEBUG_TFCSTS_IDLE 0 +#define GMAC_DEBUG_TFCSTS_WAIT 1 +#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2 +#define GMAC_DEBUG_TFCSTS_XFER 3 +/* MAC GMII or MII Transmit Protocol Engine Status */ +#define GMAC_DEBUG_TPESTS BIT(16) +#define GMAC_DEBUG_RXFSTS_MASK GENMASK(9, 8) /* MTL Rx FIFO Fill-level */ +#define GMAC_DEBUG_RXFSTS_SHIFT 8 +#define GMAC_DEBUG_RXFSTS_EMPTY 0 +#define GMAC_DEBUG_RXFSTS_BT 1 +#define GMAC_DEBUG_RXFSTS_AT 2 +#define GMAC_DEBUG_RXFSTS_FULL 3 +#define GMAC_DEBUG_RRCSTS_MASK GENMASK(6, 5) /* MTL Rx FIFO Read Controller */ +#define GMAC_DEBUG_RRCSTS_SHIFT 5 +#define GMAC_DEBUG_RRCSTS_IDLE 0 +#define GMAC_DEBUG_RRCSTS_RDATA 1 +#define GMAC_DEBUG_RRCSTS_RSTAT 2 +#define GMAC_DEBUG_RRCSTS_FLUSH 3 +#define GMAC_DEBUG_RWCSTS BIT(4) /* MTL Rx FIFO Write Controller Active */ +/* MAC Receive Frame Controller FIFO Status */ +#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1) +#define GMAC_DEBUG_RFCFCSTS_SHIFT 1 +/* MAC GMII or MII Receive Protocol Engine Status */ +#define GMAC_DEBUG_RPESTS BIT(0) + +/*--- DMA BLOCK defines ---*/ +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +/* Programmable burst length (passed thorugh platform)*/ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ + +enum rx_tx_priority_ratio { + double_ratio = 0x00004000, /* 2:1 */ + triple_ratio = 0x00008000, /* 3:1 */ + quadruple_ratio = 0x0000c000, /* 4:1 */ +}; + +#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ +#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */ +#define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */ +#define DMA_BUS_MODE_RPBL_SHIFT 17 +#define DMA_BUS_MODE_USP 0x00800000 +#define DMA_BUS_MODE_MAXPBL 0x01000000 +#define DMA_BUS_MODE_AAL 0x02000000 + +/* DMA CRS Control and Status Register Mapping */ +#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */ +#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */ +/* DMA Bus Mode register defines */ +#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ +#define DMA_BUS_PR_RATIO_SHIFT 14 +#define DMA_BUS_FB 0x00010000 /* Fixed Burst */ + +/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/ +/* Disable Drop TCP/IP csum error */ +#define DMA_CONTROL_DT 0x04000000 +#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */ +#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */ +/* Threshold for Activating the FC */ +enum rfa { + act_full_minus_1 = 0x00800000, + act_full_minus_2 = 0x00800200, + act_full_minus_3 = 0x00800400, + act_full_minus_4 = 0x00800600, +}; +/* Threshold for Deactivating the FC */ +enum rfd { + deac_full_minus_1 = 0x00400000, + deac_full_minus_2 = 0x00400800, + deac_full_minus_3 = 0x00401000, + deac_full_minus_4 = 0x00401800, +}; +#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ + +enum ttc_control { + DMA_CONTROL_TTC_64 = 0x00000000, + DMA_CONTROL_TTC_128 = 0x00004000, + DMA_CONTROL_TTC_192 = 0x00008000, + DMA_CONTROL_TTC_256 = 0x0000c000, + DMA_CONTROL_TTC_40 = 0x00010000, + DMA_CONTROL_TTC_32 = 0x00014000, + DMA_CONTROL_TTC_24 = 0x00018000, + DMA_CONTROL_TTC_16 = 0x0001c000, +}; +#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff + +#define DMA_CONTROL_EFC 0x00000100 +#define DMA_CONTROL_FEF 0x00000080 +#define DMA_CONTROL_FUF 0x00000040 + +/* Receive flow control activation field + * RFA field in DMA control register, bits 23,10:9 + */ +#define DMA_CONTROL_RFA_MASK 0x00800600 + +/* Receive flow control deactivation field + * RFD field in DMA control register, bits 22,12:11 + */ +#define DMA_CONTROL_RFD_MASK 0x00401800 + +/* RFD and RFA fields are encoded as follows + * + * Bit Field + * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled) + * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,11 - Reserved + * + * RFD should always be > RFA for a given FIFO size. RFD == RFA may work, + * but packet throughput performance may not be as expected. + * + * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame + * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause + * Description). + * + * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6), + * is set to 0. This allows pause frames with a quanta of 0 to be sent + * as an XOFF message to the link peer. + */ + +#define RFA_FULL_MINUS_1K 0x00000000 +#define RFA_FULL_MINUS_2K 0x00000200 +#define RFA_FULL_MINUS_3K 0x00000400 +#define RFA_FULL_MINUS_4K 0x00000600 +#define RFA_FULL_MINUS_5K 0x00800000 +#define RFA_FULL_MINUS_6K 0x00800200 +#define RFA_FULL_MINUS_7K 0x00800400 + +#define RFD_FULL_MINUS_1K 0x00000000 +#define RFD_FULL_MINUS_2K 0x00000800 +#define RFD_FULL_MINUS_3K 0x00001000 +#define RFD_FULL_MINUS_4K 0x00001800 +#define RFD_FULL_MINUS_5K 0x00400000 +#define RFD_FULL_MINUS_6K 0x00400800 +#define RFD_FULL_MINUS_7K 0x00401000 + +enum rtc_control { + DMA_CONTROL_RTC_64 = 0x00000000, + DMA_CONTROL_RTC_32 = 0x00000008, + DMA_CONTROL_RTC_96 = 0x00000010, + DMA_CONTROL_RTC_128 = 0x00000018, +}; +#define DMA_CONTROL_TC_RX_MASK 0xffffffe7 + +#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */ + +/* MMC registers offset */ +#define GMAC_MMC_CTRL 0x100 +#define GMAC_MMC_RX_INTR 0x104 +#define GMAC_MMC_TX_INTR 0x108 +#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 +#define GMAC_EXTHASH_BASE 0x500 + +extern const struct stmmac_dma_ops dwmac1000_dma_ops; +#endif /* __DWMAC1000_H__ */ diff --git a/devices/stmmac/dwmac1000_core-6.1-ethercat.c b/devices/stmmac/dwmac1000_core-6.1-ethercat.c new file mode 100644 index 00000000..a842b728 --- /dev/null +++ b/devices/stmmac/dwmac1000_core-6.1-ethercat.c @@ -0,0 +1,556 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for + developing this code. + + This only implements the mac core functions for this chip. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include "stmmac-6.1-ethercat.h" +#include "stmmac_pcs-6.1-ethercat.h" +#include "dwmac1000-6.1-ethercat.h" + +static void dwmac1000_core_init(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONTROL); + int mtu = dev->mtu; + + /* Configure GMAC core */ + value |= GMAC_CORE_INIT; + + if (mtu > 1500) + value |= GMAC_CONTROL_2K; + if (mtu > 2000) + value |= GMAC_CONTROL_JE; + + if (hw->ps) { + value |= GMAC_CONTROL_TE; + + value &= ~hw->link.speed_mask; + switch (hw->ps) { + case SPEED_1000: + value |= hw->link.speed1000; + break; + case SPEED_100: + value |= hw->link.speed100; + break; + case SPEED_10: + value |= hw->link.speed10; + break; + } + } + + writel(value, ioaddr + GMAC_CONTROL); + + /* Mask GMAC interrupts */ + value = GMAC_INT_DEFAULT_MASK; + + if (hw->pcs) + value &= ~GMAC_INT_DISABLE_PCS; + + writel(value, ioaddr + GMAC_INT_MASK); + +#ifdef STMMAC_VLAN_TAG_USED + /* Tag detection without filtering */ + writel(0x0, ioaddr + GMAC_VLAN_TAG); +#endif +} + +static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONTROL); + + if (hw->rx_csum) + value |= GMAC_CONTROL_IPC; + else + value &= ~GMAC_CONTROL_IPC; + + writel(value, ioaddr + GMAC_CONTROL); + + value = readl(ioaddr + GMAC_CONTROL); + + return !!(value & GMAC_CONTROL_IPC); +} + +static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + for (i = 0; i < 55; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + +static void dwmac1000_set_umac_addr(struct mac_device_info *hw, + const unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac1000_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits, + int mcbitslog2) +{ + int numhashregs, regs; + + switch (mcbitslog2) { + case 6: + writel(mcfilterbits[0], ioaddr + GMAC_HASH_LOW); + writel(mcfilterbits[1], ioaddr + GMAC_HASH_HIGH); + return; + case 7: + numhashregs = 4; + break; + case 8: + numhashregs = 8; + break; + default: + pr_debug("STMMAC: err in setting multicast filter\n"); + return; + } + for (regs = 0; regs < numhashregs; regs++) + writel(mcfilterbits[regs], + ioaddr + GMAC_EXTHASH_BASE + regs * 4); +} + +static void dwmac1000_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + unsigned int value = 0; + unsigned int perfect_addr_number = hw->unicast_filter_entries; + u32 mc_filter[8]; + int mcbitslog2 = hw->mcast_bits_log2; + + pr_debug("%s: # mcasts %d, # unicast %d\n", __func__, + netdev_mc_count(dev), netdev_uc_count(dev)); + + memset(mc_filter, 0, sizeof(mc_filter)); + + if (dev->flags & IFF_PROMISC) { + value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF; + } else if (dev->flags & IFF_ALLMULTI) { + value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) { + /* Fall back to all multicast if we've no filter */ + value = GMAC_FRAME_FILTER_PM; + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + /* Hash filter for multicast */ + value = GMAC_FRAME_FILTER_HMC; + + netdev_for_each_mc_addr(ha, dev) { + /* The upper n bits of the calculated CRC are used to + * index the contents of the hash table. The number of + * bits used depends on the hardware configuration + * selected at core configuration time. + */ + int bit_nr = bitrev32(~crc32_le(~0, ha->addr, + ETH_ALEN)) >> + (32 - mcbitslog2); + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + } + + value |= GMAC_FRAME_FILTER_HPF; + dwmac1000_set_mchash(ioaddr, mc_filter, mcbitslog2); + + /* Handle multiple unicast addresses (perfect filtering) */ + if (netdev_uc_count(dev) > perfect_addr_number) + /* Switch to promiscuous mode if more than unicast + * addresses are requested than supported by hardware. + */ + value |= GMAC_FRAME_FILTER_PR; + else { + int reg = 1; + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, dev) { + stmmac_set_mac_addr(ioaddr, ha->addr, + GMAC_ADDR_HIGH(reg), + GMAC_ADDR_LOW(reg)); + reg++; + } + + while (reg < perfect_addr_number) { + writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); + writel(0, ioaddr + GMAC_ADDR_LOW(reg)); + reg++; + } + } + +#ifdef FRAME_FILTER_DEBUG + /* Enable Receive all mode (to debug filtering_fail errors) */ + value |= GMAC_FRAME_FILTER_RA; +#endif + writel(value, ioaddr + GMAC_FRAME_FILTER); +} + + +static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + /* Set flow such that DZPQ in Mac Register 6 is 0, + * and unicast pause detect is enabled. + */ + unsigned int flow = GMAC_FLOW_CTRL_UP; + + pr_debug("GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { + pr_debug("\tReceive Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_RFE; + } + if (fc & FLOW_TX) { + pr_debug("\tTransmit Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_TFE; + } + + if (duplex) { + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); + flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); + } + + writel(flow, ioaddr + GMAC_FLOW_CTRL); +} + +static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int pmt = 0; + + if (mode & WAKE_MAGIC) { + pr_debug("GMAC: WOL Magic frame\n"); + pmt |= power_down | magic_pkt_en; + } + if (mode & WAKE_UCAST) { + pr_debug("GMAC: WOL on global unicast\n"); + pmt |= power_down | global_unicast | wake_up_frame_en; + } + + writel(pmt, ioaddr + GMAC_PMT); +} + +/* RGMII or SMII interface */ +static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 status; + + status = readl(ioaddr + GMAC_RGSMIIIS); + x->irq_rgmii_n++; + + /* Check the link status */ + if (status & GMAC_RGSMIIIS_LNKSTS) { + int speed_value; + + x->pcs_link = 1; + + speed_value = ((status & GMAC_RGSMIIIS_SPEED) >> + GMAC_RGSMIIIS_SPEED_SHIFT); + if (speed_value == GMAC_RGSMIIIS_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_RGSMIIIS_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; + + x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK); + + pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_info("Link is Down\n"); + } +} + +static int dwmac1000_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + u32 intr_mask = readl(ioaddr + GMAC_INT_MASK); + int ret = 0; + + /* Discard masked bits */ + intr_status &= ~intr_mask; + + /* Not used events (e.g. MMC interrupts) are not handled. */ + if ((intr_status & GMAC_INT_STATUS_MMCTIS)) + x->mmc_tx_irq_n++; + if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS)) + x->mmc_rx_irq_n++; + if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM)) + x->mmc_rx_csum_offload_irq_n++; + if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) { + /* clear the PMT bits 5 and 6 by reading the PMT status reg */ + readl(ioaddr + GMAC_PMT); + x->irq_receive_pmt_irq_n++; + } + + /* MAC tx/rx EEE LPI entry/exit interrupts */ + if (intr_status & GMAC_INT_STATUS_LPIIS) { + /* Clean LPI interrupt by reading the Reg 12 */ + ret = readl(ioaddr + LPI_CTRL_STATUS); + + if (ret & LPI_CTRL_STATUS_TLPIEN) + x->irq_tx_path_in_lpi_mode_n++; + if (ret & LPI_CTRL_STATUS_TLPIEX) + x->irq_tx_path_exit_lpi_mode_n++; + if (ret & LPI_CTRL_STATUS_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (ret & LPI_CTRL_STATUS_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); + + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac1000_rgsmii(ioaddr, x); + + return ret; +} + +static void dwmac1000_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + /*TODO - en_tx_lpi_clockgating treatment */ + + /* Enable the link status receive on RGMII, SGMII ore SMII + * receive path and instruct the transmit to enter in LPI + * state. + */ + value = readl(ioaddr + LPI_CTRL_STATUS); + value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA; + writel(value, ioaddr + LPI_CTRL_STATUS); +} + +static void dwmac1000_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + LPI_CTRL_STATUS); + value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA); + writel(value, ioaddr + LPI_CTRL_STATUS); +} + +static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + LPI_CTRL_STATUS); + + if (link) + value |= LPI_CTRL_STATUS_PLS; + else + value &= ~LPI_CTRL_STATUS_PLS; + + writel(value, ioaddr + LPI_CTRL_STATUS); +} + +static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + LPI_TIMER_CTRL); +} + +static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback) +{ + dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); +} + +static void dwmac1000_rane(void __iomem *ioaddr, bool restart) +{ + dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); +} + +static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) +{ + dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); +} + +static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues) +{ + u32 value = readl(ioaddr + GMAC_DEBUG); + + if (value & GMAC_DEBUG_TXSTSFSTS) + x->mtl_tx_status_fifo_full++; + if (value & GMAC_DEBUG_TXFSTS) + x->mtl_tx_fifo_not_empty++; + if (value & GMAC_DEBUG_TWCSTS) + x->mmtl_fifo_ctrl++; + if (value & GMAC_DEBUG_TRCSTS_MASK) { + u32 trcsts = (value & GMAC_DEBUG_TRCSTS_MASK) + >> GMAC_DEBUG_TRCSTS_SHIFT; + if (trcsts == GMAC_DEBUG_TRCSTS_WRITE) + x->mtl_tx_fifo_read_ctrl_write++; + else if (trcsts == GMAC_DEBUG_TRCSTS_TXW) + x->mtl_tx_fifo_read_ctrl_wait++; + else if (trcsts == GMAC_DEBUG_TRCSTS_READ) + x->mtl_tx_fifo_read_ctrl_read++; + else + x->mtl_tx_fifo_read_ctrl_idle++; + } + if (value & GMAC_DEBUG_TXPAUSED) + x->mac_tx_in_pause++; + if (value & GMAC_DEBUG_TFCSTS_MASK) { + u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) + >> GMAC_DEBUG_TFCSTS_SHIFT; + + if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) + x->mac_tx_frame_ctrl_xfer++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) + x->mac_tx_frame_ctrl_pause++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) + x->mac_tx_frame_ctrl_wait++; + else + x->mac_tx_frame_ctrl_idle++; + } + if (value & GMAC_DEBUG_TPESTS) + x->mac_gmii_tx_proto_engine++; + if (value & GMAC_DEBUG_RXFSTS_MASK) { + u32 rxfsts = (value & GMAC_DEBUG_RXFSTS_MASK) + >> GMAC_DEBUG_RRCSTS_SHIFT; + + if (rxfsts == GMAC_DEBUG_RXFSTS_FULL) + x->mtl_rx_fifo_fill_level_full++; + else if (rxfsts == GMAC_DEBUG_RXFSTS_AT) + x->mtl_rx_fifo_fill_above_thresh++; + else if (rxfsts == GMAC_DEBUG_RXFSTS_BT) + x->mtl_rx_fifo_fill_below_thresh++; + else + x->mtl_rx_fifo_fill_level_empty++; + } + if (value & GMAC_DEBUG_RRCSTS_MASK) { + u32 rrcsts = (value & GMAC_DEBUG_RRCSTS_MASK) >> + GMAC_DEBUG_RRCSTS_SHIFT; + + if (rrcsts == GMAC_DEBUG_RRCSTS_FLUSH) + x->mtl_rx_fifo_read_ctrl_flush++; + else if (rrcsts == GMAC_DEBUG_RRCSTS_RSTAT) + x->mtl_rx_fifo_read_ctrl_read_data++; + else if (rrcsts == GMAC_DEBUG_RRCSTS_RDATA) + x->mtl_rx_fifo_read_ctrl_status++; + else + x->mtl_rx_fifo_read_ctrl_idle++; + } + if (value & GMAC_DEBUG_RWCSTS) + x->mtl_rx_fifo_ctrl_active++; + if (value & GMAC_DEBUG_RFCFCSTS_MASK) + x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) + >> GMAC_DEBUG_RFCFCSTS_SHIFT; + if (value & GMAC_DEBUG_RPESTS) + x->mac_gmii_rx_proto_engine++; +} + +static void dwmac1000_set_mac_loopback(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + GMAC_CONTROL); + + if (enable) + value |= GMAC_CONTROL_LM; + else + value &= ~GMAC_CONTROL_LM; + + writel(value, ioaddr + GMAC_CONTROL); +} + +const struct stmmac_ops dwmac1000_ops = { + .core_init = dwmac1000_core_init, + .set_mac = stmmac_set_mac, + .rx_ipc = dwmac1000_rx_ipc_enable, + .dump_regs = dwmac1000_dump_regs, + .host_irq_status = dwmac1000_irq_status, + .set_filter = dwmac1000_set_filter, + .flow_ctrl = dwmac1000_flow_ctrl, + .pmt = dwmac1000_pmt, + .set_umac_addr = dwmac1000_set_umac_addr, + .get_umac_addr = dwmac1000_get_umac_addr, + .set_eee_mode = dwmac1000_set_eee_mode, + .reset_eee_mode = dwmac1000_reset_eee_mode, + .set_eee_timer = dwmac1000_set_eee_timer, + .set_eee_pls = dwmac1000_set_eee_pls, + .debug = dwmac1000_debug, + .pcs_ctrl_ane = dwmac1000_ctrl_ane, + .pcs_rane = dwmac1000_rane, + .pcs_get_adv_lp = dwmac1000_get_adv_lp, + .set_mac_loopback = dwmac1000_set_mac_loopback, +}; + +int dwmac1000_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tDWMAC1000\n"); + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.duplex = GMAC_CONTROL_DM; + mac->link.speed10 = GMAC_CONTROL_PS; + mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->link.speed1000 = 0; + mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->mii.addr = GMAC_MII_ADDR; + mac->mii.data = GMAC_MII_DATA; + mac->mii.addr_shift = 11; + mac->mii.addr_mask = 0x0000F800; + mac->mii.reg_shift = 6; + mac->mii.reg_mask = 0x000007C0; + mac->mii.clk_csr_shift = 2; + mac->mii.clk_csr_mask = GENMASK(5, 2); + + return 0; +} diff --git a/devices/stmmac/dwmac1000_core-6.1-orig.c b/devices/stmmac/dwmac1000_core-6.1-orig.c new file mode 100644 index 00000000..0e00dd83 --- /dev/null +++ b/devices/stmmac/dwmac1000_core-6.1-orig.c @@ -0,0 +1,556 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for + developing this code. + + This only implements the mac core functions for this chip. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include "stmmac.h" +#include "stmmac_pcs.h" +#include "dwmac1000.h" + +static void dwmac1000_core_init(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONTROL); + int mtu = dev->mtu; + + /* Configure GMAC core */ + value |= GMAC_CORE_INIT; + + if (mtu > 1500) + value |= GMAC_CONTROL_2K; + if (mtu > 2000) + value |= GMAC_CONTROL_JE; + + if (hw->ps) { + value |= GMAC_CONTROL_TE; + + value &= ~hw->link.speed_mask; + switch (hw->ps) { + case SPEED_1000: + value |= hw->link.speed1000; + break; + case SPEED_100: + value |= hw->link.speed100; + break; + case SPEED_10: + value |= hw->link.speed10; + break; + } + } + + writel(value, ioaddr + GMAC_CONTROL); + + /* Mask GMAC interrupts */ + value = GMAC_INT_DEFAULT_MASK; + + if (hw->pcs) + value &= ~GMAC_INT_DISABLE_PCS; + + writel(value, ioaddr + GMAC_INT_MASK); + +#ifdef STMMAC_VLAN_TAG_USED + /* Tag detection without filtering */ + writel(0x0, ioaddr + GMAC_VLAN_TAG); +#endif +} + +static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONTROL); + + if (hw->rx_csum) + value |= GMAC_CONTROL_IPC; + else + value &= ~GMAC_CONTROL_IPC; + + writel(value, ioaddr + GMAC_CONTROL); + + value = readl(ioaddr + GMAC_CONTROL); + + return !!(value & GMAC_CONTROL_IPC); +} + +static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + for (i = 0; i < 55; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + +static void dwmac1000_set_umac_addr(struct mac_device_info *hw, + const unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac1000_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits, + int mcbitslog2) +{ + int numhashregs, regs; + + switch (mcbitslog2) { + case 6: + writel(mcfilterbits[0], ioaddr + GMAC_HASH_LOW); + writel(mcfilterbits[1], ioaddr + GMAC_HASH_HIGH); + return; + case 7: + numhashregs = 4; + break; + case 8: + numhashregs = 8; + break; + default: + pr_debug("STMMAC: err in setting multicast filter\n"); + return; + } + for (regs = 0; regs < numhashregs; regs++) + writel(mcfilterbits[regs], + ioaddr + GMAC_EXTHASH_BASE + regs * 4); +} + +static void dwmac1000_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + unsigned int value = 0; + unsigned int perfect_addr_number = hw->unicast_filter_entries; + u32 mc_filter[8]; + int mcbitslog2 = hw->mcast_bits_log2; + + pr_debug("%s: # mcasts %d, # unicast %d\n", __func__, + netdev_mc_count(dev), netdev_uc_count(dev)); + + memset(mc_filter, 0, sizeof(mc_filter)); + + if (dev->flags & IFF_PROMISC) { + value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF; + } else if (dev->flags & IFF_ALLMULTI) { + value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) { + /* Fall back to all multicast if we've no filter */ + value = GMAC_FRAME_FILTER_PM; + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + /* Hash filter for multicast */ + value = GMAC_FRAME_FILTER_HMC; + + netdev_for_each_mc_addr(ha, dev) { + /* The upper n bits of the calculated CRC are used to + * index the contents of the hash table. The number of + * bits used depends on the hardware configuration + * selected at core configuration time. + */ + int bit_nr = bitrev32(~crc32_le(~0, ha->addr, + ETH_ALEN)) >> + (32 - mcbitslog2); + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + } + + value |= GMAC_FRAME_FILTER_HPF; + dwmac1000_set_mchash(ioaddr, mc_filter, mcbitslog2); + + /* Handle multiple unicast addresses (perfect filtering) */ + if (netdev_uc_count(dev) > perfect_addr_number) + /* Switch to promiscuous mode if more than unicast + * addresses are requested than supported by hardware. + */ + value |= GMAC_FRAME_FILTER_PR; + else { + int reg = 1; + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, dev) { + stmmac_set_mac_addr(ioaddr, ha->addr, + GMAC_ADDR_HIGH(reg), + GMAC_ADDR_LOW(reg)); + reg++; + } + + while (reg < perfect_addr_number) { + writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); + writel(0, ioaddr + GMAC_ADDR_LOW(reg)); + reg++; + } + } + +#ifdef FRAME_FILTER_DEBUG + /* Enable Receive all mode (to debug filtering_fail errors) */ + value |= GMAC_FRAME_FILTER_RA; +#endif + writel(value, ioaddr + GMAC_FRAME_FILTER); +} + + +static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + /* Set flow such that DZPQ in Mac Register 6 is 0, + * and unicast pause detect is enabled. + */ + unsigned int flow = GMAC_FLOW_CTRL_UP; + + pr_debug("GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { + pr_debug("\tReceive Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_RFE; + } + if (fc & FLOW_TX) { + pr_debug("\tTransmit Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_TFE; + } + + if (duplex) { + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); + flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); + } + + writel(flow, ioaddr + GMAC_FLOW_CTRL); +} + +static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int pmt = 0; + + if (mode & WAKE_MAGIC) { + pr_debug("GMAC: WOL Magic frame\n"); + pmt |= power_down | magic_pkt_en; + } + if (mode & WAKE_UCAST) { + pr_debug("GMAC: WOL on global unicast\n"); + pmt |= power_down | global_unicast | wake_up_frame_en; + } + + writel(pmt, ioaddr + GMAC_PMT); +} + +/* RGMII or SMII interface */ +static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 status; + + status = readl(ioaddr + GMAC_RGSMIIIS); + x->irq_rgmii_n++; + + /* Check the link status */ + if (status & GMAC_RGSMIIIS_LNKSTS) { + int speed_value; + + x->pcs_link = 1; + + speed_value = ((status & GMAC_RGSMIIIS_SPEED) >> + GMAC_RGSMIIIS_SPEED_SHIFT); + if (speed_value == GMAC_RGSMIIIS_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_RGSMIIIS_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; + + x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK); + + pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_info("Link is Down\n"); + } +} + +static int dwmac1000_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + u32 intr_mask = readl(ioaddr + GMAC_INT_MASK); + int ret = 0; + + /* Discard masked bits */ + intr_status &= ~intr_mask; + + /* Not used events (e.g. MMC interrupts) are not handled. */ + if ((intr_status & GMAC_INT_STATUS_MMCTIS)) + x->mmc_tx_irq_n++; + if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS)) + x->mmc_rx_irq_n++; + if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM)) + x->mmc_rx_csum_offload_irq_n++; + if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) { + /* clear the PMT bits 5 and 6 by reading the PMT status reg */ + readl(ioaddr + GMAC_PMT); + x->irq_receive_pmt_irq_n++; + } + + /* MAC tx/rx EEE LPI entry/exit interrupts */ + if (intr_status & GMAC_INT_STATUS_LPIIS) { + /* Clean LPI interrupt by reading the Reg 12 */ + ret = readl(ioaddr + LPI_CTRL_STATUS); + + if (ret & LPI_CTRL_STATUS_TLPIEN) + x->irq_tx_path_in_lpi_mode_n++; + if (ret & LPI_CTRL_STATUS_TLPIEX) + x->irq_tx_path_exit_lpi_mode_n++; + if (ret & LPI_CTRL_STATUS_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (ret & LPI_CTRL_STATUS_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); + + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac1000_rgsmii(ioaddr, x); + + return ret; +} + +static void dwmac1000_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + /*TODO - en_tx_lpi_clockgating treatment */ + + /* Enable the link status receive on RGMII, SGMII ore SMII + * receive path and instruct the transmit to enter in LPI + * state. + */ + value = readl(ioaddr + LPI_CTRL_STATUS); + value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA; + writel(value, ioaddr + LPI_CTRL_STATUS); +} + +static void dwmac1000_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + LPI_CTRL_STATUS); + value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA); + writel(value, ioaddr + LPI_CTRL_STATUS); +} + +static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + LPI_CTRL_STATUS); + + if (link) + value |= LPI_CTRL_STATUS_PLS; + else + value &= ~LPI_CTRL_STATUS_PLS; + + writel(value, ioaddr + LPI_CTRL_STATUS); +} + +static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + LPI_TIMER_CTRL); +} + +static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback) +{ + dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); +} + +static void dwmac1000_rane(void __iomem *ioaddr, bool restart) +{ + dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); +} + +static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) +{ + dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); +} + +static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues) +{ + u32 value = readl(ioaddr + GMAC_DEBUG); + + if (value & GMAC_DEBUG_TXSTSFSTS) + x->mtl_tx_status_fifo_full++; + if (value & GMAC_DEBUG_TXFSTS) + x->mtl_tx_fifo_not_empty++; + if (value & GMAC_DEBUG_TWCSTS) + x->mmtl_fifo_ctrl++; + if (value & GMAC_DEBUG_TRCSTS_MASK) { + u32 trcsts = (value & GMAC_DEBUG_TRCSTS_MASK) + >> GMAC_DEBUG_TRCSTS_SHIFT; + if (trcsts == GMAC_DEBUG_TRCSTS_WRITE) + x->mtl_tx_fifo_read_ctrl_write++; + else if (trcsts == GMAC_DEBUG_TRCSTS_TXW) + x->mtl_tx_fifo_read_ctrl_wait++; + else if (trcsts == GMAC_DEBUG_TRCSTS_READ) + x->mtl_tx_fifo_read_ctrl_read++; + else + x->mtl_tx_fifo_read_ctrl_idle++; + } + if (value & GMAC_DEBUG_TXPAUSED) + x->mac_tx_in_pause++; + if (value & GMAC_DEBUG_TFCSTS_MASK) { + u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) + >> GMAC_DEBUG_TFCSTS_SHIFT; + + if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) + x->mac_tx_frame_ctrl_xfer++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) + x->mac_tx_frame_ctrl_pause++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) + x->mac_tx_frame_ctrl_wait++; + else + x->mac_tx_frame_ctrl_idle++; + } + if (value & GMAC_DEBUG_TPESTS) + x->mac_gmii_tx_proto_engine++; + if (value & GMAC_DEBUG_RXFSTS_MASK) { + u32 rxfsts = (value & GMAC_DEBUG_RXFSTS_MASK) + >> GMAC_DEBUG_RRCSTS_SHIFT; + + if (rxfsts == GMAC_DEBUG_RXFSTS_FULL) + x->mtl_rx_fifo_fill_level_full++; + else if (rxfsts == GMAC_DEBUG_RXFSTS_AT) + x->mtl_rx_fifo_fill_above_thresh++; + else if (rxfsts == GMAC_DEBUG_RXFSTS_BT) + x->mtl_rx_fifo_fill_below_thresh++; + else + x->mtl_rx_fifo_fill_level_empty++; + } + if (value & GMAC_DEBUG_RRCSTS_MASK) { + u32 rrcsts = (value & GMAC_DEBUG_RRCSTS_MASK) >> + GMAC_DEBUG_RRCSTS_SHIFT; + + if (rrcsts == GMAC_DEBUG_RRCSTS_FLUSH) + x->mtl_rx_fifo_read_ctrl_flush++; + else if (rrcsts == GMAC_DEBUG_RRCSTS_RSTAT) + x->mtl_rx_fifo_read_ctrl_read_data++; + else if (rrcsts == GMAC_DEBUG_RRCSTS_RDATA) + x->mtl_rx_fifo_read_ctrl_status++; + else + x->mtl_rx_fifo_read_ctrl_idle++; + } + if (value & GMAC_DEBUG_RWCSTS) + x->mtl_rx_fifo_ctrl_active++; + if (value & GMAC_DEBUG_RFCFCSTS_MASK) + x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) + >> GMAC_DEBUG_RFCFCSTS_SHIFT; + if (value & GMAC_DEBUG_RPESTS) + x->mac_gmii_rx_proto_engine++; +} + +static void dwmac1000_set_mac_loopback(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + GMAC_CONTROL); + + if (enable) + value |= GMAC_CONTROL_LM; + else + value &= ~GMAC_CONTROL_LM; + + writel(value, ioaddr + GMAC_CONTROL); +} + +const struct stmmac_ops dwmac1000_ops = { + .core_init = dwmac1000_core_init, + .set_mac = stmmac_set_mac, + .rx_ipc = dwmac1000_rx_ipc_enable, + .dump_regs = dwmac1000_dump_regs, + .host_irq_status = dwmac1000_irq_status, + .set_filter = dwmac1000_set_filter, + .flow_ctrl = dwmac1000_flow_ctrl, + .pmt = dwmac1000_pmt, + .set_umac_addr = dwmac1000_set_umac_addr, + .get_umac_addr = dwmac1000_get_umac_addr, + .set_eee_mode = dwmac1000_set_eee_mode, + .reset_eee_mode = dwmac1000_reset_eee_mode, + .set_eee_timer = dwmac1000_set_eee_timer, + .set_eee_pls = dwmac1000_set_eee_pls, + .debug = dwmac1000_debug, + .pcs_ctrl_ane = dwmac1000_ctrl_ane, + .pcs_rane = dwmac1000_rane, + .pcs_get_adv_lp = dwmac1000_get_adv_lp, + .set_mac_loopback = dwmac1000_set_mac_loopback, +}; + +int dwmac1000_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tDWMAC1000\n"); + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.duplex = GMAC_CONTROL_DM; + mac->link.speed10 = GMAC_CONTROL_PS; + mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->link.speed1000 = 0; + mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->mii.addr = GMAC_MII_ADDR; + mac->mii.data = GMAC_MII_DATA; + mac->mii.addr_shift = 11; + mac->mii.addr_mask = 0x0000F800; + mac->mii.reg_shift = 6; + mac->mii.reg_mask = 0x000007C0; + mac->mii.clk_csr_shift = 2; + mac->mii.clk_csr_mask = GENMASK(5, 2); + + return 0; +} diff --git a/devices/stmmac/dwmac1000_dma-6.1-ethercat.c b/devices/stmmac/dwmac1000_dma-6.1-ethercat.c new file mode 100644 index 00000000..ba2f74b9 --- /dev/null +++ b/devices/stmmac/dwmac1000_dma-6.1-ethercat.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for + developing this code. + + This contains the functions to handle the dma. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "dwmac1000-6.1-ethercat.h" +#include "dwmac_dma-6.1-ethercat.h" + +static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + DMA_AXI_BUS_MODE); + int i; + + pr_info("dwmac1000: Master AXI performs %s burst length\n", + !(value & DMA_AXI_UNDEF) ? "fixed" : "any"); + + if (axi->axi_lpi_en) + value |= DMA_AXI_EN_LPI; + if (axi->axi_xit_frm) + value |= DMA_AXI_LPI_XIT_FRM; + + value &= ~DMA_AXI_WR_OSR_LMT; + value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) << + DMA_AXI_WR_OSR_LMT_SHIFT; + + value &= ~DMA_AXI_RD_OSR_LMT; + value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) << + DMA_AXI_RD_OSR_LMT_SHIFT; + + /* Depending on the UNDEF bit the Master AXI will perform any burst + * length according to the BLEN programmed (by default all BLEN are + * set). + */ + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= DMA_AXI_BLEN256; + break; + case 128: + value |= DMA_AXI_BLEN128; + break; + case 64: + value |= DMA_AXI_BLEN64; + break; + case 32: + value |= DMA_AXI_BLEN32; + break; + case 16: + value |= DMA_AXI_BLEN16; + break; + case 8: + value |= DMA_AXI_BLEN8; + break; + case 4: + value |= DMA_AXI_BLEN4; + break; + } + } + + writel(value, ioaddr + DMA_AXI_BUS_MODE); +} + +static void dwmac1000_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + + /* + * Set the DMA PBL (Programmable Burst Length) mode. + * + * Note: before stmmac core 3.50 this mode bit was 4xPBL, and + * post 3.5 mode bit acts as 8*PBL. + */ + if (dma_cfg->pblx8) + value |= DMA_BUS_MODE_MAXPBL; + value |= DMA_BUS_MODE_USP; + value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK); + value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT); + value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + + /* Set the Fixed burst mode */ + if (dma_cfg->fixed_burst) + value |= DMA_BUS_MODE_FB; + + /* Mixed Burst has no effect when fb is set */ + if (dma_cfg->mixed_burst) + value |= DMA_BUS_MODE_MB; + + if (atds) + value |= DMA_BUS_MODE_ATDS; + + if (dma_cfg->aal) + value |= DMA_BUS_MODE_AAL; + + writel(value, ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); +} + +static void dwmac1000_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_rx_phy, u32 chan) +{ + /* RX descriptor base address list must be written into DMA CSR3 */ + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR); +} + +static void dwmac1000_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_tx_phy, u32 chan) +{ + /* TX descriptor base address list must be written into DMA CSR4 */ + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR); +} + +static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) +{ + csr6 &= ~DMA_CONTROL_RFA_MASK; + csr6 &= ~DMA_CONTROL_RFD_MASK; + + /* Leave flow control disabled if receive fifo size is less than + * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full, + * and send XON when 2K less than full. + */ + if (rxfifosz < 4096) { + csr6 &= ~DMA_CONTROL_EFC; + pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n", + rxfifosz); + } else { + csr6 |= DMA_CONTROL_EFC; + csr6 |= RFA_FULL_MINUS_1K; + csr6 |= RFD_FULL_MINUS_2K; + } + return csr6; +} + +static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + csr6 |= DMA_CONTROL_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); + csr6 &= ~DMA_CONTROL_RSF; + csr6 &= DMA_CONTROL_TC_RX_MASK; + if (mode <= 32) + csr6 |= DMA_CONTROL_RTC_32; + else if (mode <= 64) + csr6 |= DMA_CONTROL_RTC_64; + else if (mode <= 96) + csr6 |= DMA_CONTROL_RTC_96; + else + csr6 |= DMA_CONTROL_RTC_128; + } + + /* Configure flow control based on rx fifo size */ + csr6 = dwmac1000_configure_fc(csr6, fifosz); + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + csr6 |= DMA_CONTROL_TSF; + /* Operating on second frame increase the performance + * especially when transmit store-and-forward is used. + */ + csr6 |= DMA_CONTROL_OSF; + } else { + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); + csr6 &= ~DMA_CONTROL_TSF; + csr6 &= DMA_CONTROL_TC_TX_MASK; + /* Set the transmit threshold */ + if (mode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (mode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else if (mode <= 128) + csr6 |= DMA_CONTROL_TTC_128; + else if (mode <= 192) + csr6 |= DMA_CONTROL_TTC_192; + else + csr6 |= DMA_CONTROL_TTC_256; + } + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++) + if ((i < 12) || (i > 17)) + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); +} + +static int dwmac1000_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) +{ + u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE); + + if (!hw_cap) { + /* 0x00000000 is the value read on old hardware that does not + * implement this register + */ + return -EOPNOTSUPP; + } + + dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); + dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; + dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; + dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; + dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5; + dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; + dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; + dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; + dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; + /* MMC */ + dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; + /* IEEE 1588-2002 */ + dma_cap->time_stamp = + (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; + /* IEEE 1588-2008 */ + dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; + dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; + /* TX and RX csum */ + dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; + dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; + dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; + dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; + /* TX and RX number of channels */ + dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; + dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; + /* Alternate (enhanced) DESC mode */ + dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; + + return 0; +} + +static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, + u32 queue) +{ + writel(riwt, ioaddr + DMA_RX_WATCHDOG); +} + +const struct stmmac_dma_ops dwmac1000_dma_ops = { + .reset = dwmac_dma_reset, + .init = dwmac1000_dma_init, + .init_rx_chan = dwmac1000_dma_init_rx, + .init_tx_chan = dwmac1000_dma_init_tx, + .axi = dwmac1000_dma_axi, + .dump_regs = dwmac1000_dump_dma_regs, + .dma_rx_mode = dwmac1000_dma_operation_mode_rx, + .dma_tx_mode = dwmac1000_dma_operation_mode_tx, + .enable_dma_transmission = dwmac_enable_dma_transmission, + .enable_dma_irq = dwmac_enable_dma_irq, + .disable_dma_irq = dwmac_disable_dma_irq, + .start_tx = dwmac_dma_start_tx, + .stop_tx = dwmac_dma_stop_tx, + .start_rx = dwmac_dma_start_rx, + .stop_rx = dwmac_dma_stop_rx, + .dma_interrupt = dwmac_dma_interrupt, + .get_hw_feature = dwmac1000_get_hw_feature, + .rx_watchdog = dwmac1000_rx_watchdog, +}; diff --git a/devices/stmmac/dwmac1000_dma-6.1-orig.c b/devices/stmmac/dwmac1000_dma-6.1-orig.c new file mode 100644 index 00000000..f5581db0 --- /dev/null +++ b/devices/stmmac/dwmac1000_dma-6.1-orig.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for + developing this code. + + This contains the functions to handle the dma. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "dwmac1000.h" +#include "dwmac_dma.h" + +static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + DMA_AXI_BUS_MODE); + int i; + + pr_info("dwmac1000: Master AXI performs %s burst length\n", + !(value & DMA_AXI_UNDEF) ? "fixed" : "any"); + + if (axi->axi_lpi_en) + value |= DMA_AXI_EN_LPI; + if (axi->axi_xit_frm) + value |= DMA_AXI_LPI_XIT_FRM; + + value &= ~DMA_AXI_WR_OSR_LMT; + value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) << + DMA_AXI_WR_OSR_LMT_SHIFT; + + value &= ~DMA_AXI_RD_OSR_LMT; + value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) << + DMA_AXI_RD_OSR_LMT_SHIFT; + + /* Depending on the UNDEF bit the Master AXI will perform any burst + * length according to the BLEN programmed (by default all BLEN are + * set). + */ + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= DMA_AXI_BLEN256; + break; + case 128: + value |= DMA_AXI_BLEN128; + break; + case 64: + value |= DMA_AXI_BLEN64; + break; + case 32: + value |= DMA_AXI_BLEN32; + break; + case 16: + value |= DMA_AXI_BLEN16; + break; + case 8: + value |= DMA_AXI_BLEN8; + break; + case 4: + value |= DMA_AXI_BLEN4; + break; + } + } + + writel(value, ioaddr + DMA_AXI_BUS_MODE); +} + +static void dwmac1000_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + + /* + * Set the DMA PBL (Programmable Burst Length) mode. + * + * Note: before stmmac core 3.50 this mode bit was 4xPBL, and + * post 3.5 mode bit acts as 8*PBL. + */ + if (dma_cfg->pblx8) + value |= DMA_BUS_MODE_MAXPBL; + value |= DMA_BUS_MODE_USP; + value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK); + value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT); + value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + + /* Set the Fixed burst mode */ + if (dma_cfg->fixed_burst) + value |= DMA_BUS_MODE_FB; + + /* Mixed Burst has no effect when fb is set */ + if (dma_cfg->mixed_burst) + value |= DMA_BUS_MODE_MB; + + if (atds) + value |= DMA_BUS_MODE_ATDS; + + if (dma_cfg->aal) + value |= DMA_BUS_MODE_AAL; + + writel(value, ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); +} + +static void dwmac1000_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_rx_phy, u32 chan) +{ + /* RX descriptor base address list must be written into DMA CSR3 */ + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR); +} + +static void dwmac1000_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_tx_phy, u32 chan) +{ + /* TX descriptor base address list must be written into DMA CSR4 */ + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR); +} + +static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) +{ + csr6 &= ~DMA_CONTROL_RFA_MASK; + csr6 &= ~DMA_CONTROL_RFD_MASK; + + /* Leave flow control disabled if receive fifo size is less than + * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full, + * and send XON when 2K less than full. + */ + if (rxfifosz < 4096) { + csr6 &= ~DMA_CONTROL_EFC; + pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n", + rxfifosz); + } else { + csr6 |= DMA_CONTROL_EFC; + csr6 |= RFA_FULL_MINUS_1K; + csr6 |= RFD_FULL_MINUS_2K; + } + return csr6; +} + +static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + csr6 |= DMA_CONTROL_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); + csr6 &= ~DMA_CONTROL_RSF; + csr6 &= DMA_CONTROL_TC_RX_MASK; + if (mode <= 32) + csr6 |= DMA_CONTROL_RTC_32; + else if (mode <= 64) + csr6 |= DMA_CONTROL_RTC_64; + else if (mode <= 96) + csr6 |= DMA_CONTROL_RTC_96; + else + csr6 |= DMA_CONTROL_RTC_128; + } + + /* Configure flow control based on rx fifo size */ + csr6 = dwmac1000_configure_fc(csr6, fifosz); + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + csr6 |= DMA_CONTROL_TSF; + /* Operating on second frame increase the performance + * especially when transmit store-and-forward is used. + */ + csr6 |= DMA_CONTROL_OSF; + } else { + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); + csr6 &= ~DMA_CONTROL_TSF; + csr6 &= DMA_CONTROL_TC_TX_MASK; + /* Set the transmit threshold */ + if (mode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (mode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else if (mode <= 128) + csr6 |= DMA_CONTROL_TTC_128; + else if (mode <= 192) + csr6 |= DMA_CONTROL_TTC_192; + else + csr6 |= DMA_CONTROL_TTC_256; + } + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++) + if ((i < 12) || (i > 17)) + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); +} + +static int dwmac1000_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) +{ + u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE); + + if (!hw_cap) { + /* 0x00000000 is the value read on old hardware that does not + * implement this register + */ + return -EOPNOTSUPP; + } + + dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); + dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; + dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; + dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; + dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5; + dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; + dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; + dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; + dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; + /* MMC */ + dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; + /* IEEE 1588-2002 */ + dma_cap->time_stamp = + (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; + /* IEEE 1588-2008 */ + dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; + dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; + /* TX and RX csum */ + dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; + dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; + dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; + dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; + /* TX and RX number of channels */ + dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; + dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; + /* Alternate (enhanced) DESC mode */ + dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; + + return 0; +} + +static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, + u32 queue) +{ + writel(riwt, ioaddr + DMA_RX_WATCHDOG); +} + +const struct stmmac_dma_ops dwmac1000_dma_ops = { + .reset = dwmac_dma_reset, + .init = dwmac1000_dma_init, + .init_rx_chan = dwmac1000_dma_init_rx, + .init_tx_chan = dwmac1000_dma_init_tx, + .axi = dwmac1000_dma_axi, + .dump_regs = dwmac1000_dump_dma_regs, + .dma_rx_mode = dwmac1000_dma_operation_mode_rx, + .dma_tx_mode = dwmac1000_dma_operation_mode_tx, + .enable_dma_transmission = dwmac_enable_dma_transmission, + .enable_dma_irq = dwmac_enable_dma_irq, + .disable_dma_irq = dwmac_disable_dma_irq, + .start_tx = dwmac_dma_start_tx, + .stop_tx = dwmac_dma_stop_tx, + .start_rx = dwmac_dma_start_rx, + .stop_rx = dwmac_dma_stop_rx, + .dma_interrupt = dwmac_dma_interrupt, + .get_hw_feature = dwmac1000_get_hw_feature, + .rx_watchdog = dwmac1000_rx_watchdog, +}; diff --git a/devices/stmmac/dwmac100_core-6.1-ethercat.c b/devices/stmmac/dwmac100_core-6.1-ethercat.c new file mode 100644 index 00000000..16d7f571 --- /dev/null +++ b/devices/stmmac/dwmac100_core-6.1-ethercat.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + This only implements the mac core functions for this chip. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include "stmmac-6.1-ethercat.h" +#include "dwmac100-6.1-ethercat.h" + +static void dwmac100_core_init(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MAC_CONTROL); + + value |= MAC_CORE_INIT; + + writel(value, ioaddr + MAC_CONTROL); + +#ifdef STMMAC_VLAN_TAG_USED + writel(ETH_P_8021Q, ioaddr + MAC_VLAN1); +#endif +} + +static void dwmac100_dump_mac_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + + reg_space[MAC_CONTROL / 4] = readl(ioaddr + MAC_CONTROL); + reg_space[MAC_ADDR_HIGH / 4] = readl(ioaddr + MAC_ADDR_HIGH); + reg_space[MAC_ADDR_LOW / 4] = readl(ioaddr + MAC_ADDR_LOW); + reg_space[MAC_HASH_HIGH / 4] = readl(ioaddr + MAC_HASH_HIGH); + reg_space[MAC_HASH_LOW / 4] = readl(ioaddr + MAC_HASH_LOW); + reg_space[MAC_FLOW_CTRL / 4] = readl(ioaddr + MAC_FLOW_CTRL); + reg_space[MAC_VLAN1 / 4] = readl(ioaddr + MAC_VLAN1); + reg_space[MAC_VLAN2 / 4] = readl(ioaddr + MAC_VLAN2); +} + +static int dwmac100_rx_ipc_enable(struct mac_device_info *hw) +{ + return 0; +} + +static int dwmac100_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + return 0; +} + +static void dwmac100_set_umac_addr(struct mac_device_info *hw, + const unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void dwmac100_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void dwmac100_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 value = readl(ioaddr + MAC_CONTROL); + + if (dev->flags & IFF_PROMISC) { + value |= MAC_CONTROL_PR; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO | + MAC_CONTROL_HP); + } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE) + || (dev->flags & IFF_ALLMULTI)) { + value |= MAC_CONTROL_PM; + value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO); + writel(0xffffffff, ioaddr + MAC_HASH_HIGH); + writel(0xffffffff, ioaddr + MAC_HASH_LOW); + } else if (netdev_mc_empty(dev)) { /* no multicast */ + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | + MAC_CONTROL_HO | MAC_CONTROL_HP); + } else { + u32 mc_filter[2]; + struct netdev_hw_addr *ha; + + /* Perfect filter mode for physical address and Hash + * filter for multicast + */ + value |= MAC_CONTROL_HP; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | + MAC_CONTROL_IF | MAC_CONTROL_HO); + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table + */ + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + MAC_HASH_LOW); + writel(mc_filter[1], ioaddr + MAC_HASH_HIGH); + } + + writel(value, ioaddr + MAC_CONTROL); +} + +static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int flow = MAC_FLOW_CTRL_ENABLE; + + if (duplex) + flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT); + writel(flow, ioaddr + MAC_FLOW_CTRL); +} + +/* No PMT module supported on ST boards with this Eth chip. */ +static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode) +{ + return; +} + +static void dwmac100_set_mac_loopback(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + MAC_CONTROL); + + if (enable) + value |= MAC_CONTROL_OM; + else + value &= ~MAC_CONTROL_OM; + + writel(value, ioaddr + MAC_CONTROL); +} + +const struct stmmac_ops dwmac100_ops = { + .core_init = dwmac100_core_init, + .set_mac = stmmac_set_mac, + .rx_ipc = dwmac100_rx_ipc_enable, + .dump_regs = dwmac100_dump_mac_regs, + .host_irq_status = dwmac100_irq_status, + .set_filter = dwmac100_set_filter, + .flow_ctrl = dwmac100_flow_ctrl, + .pmt = dwmac100_pmt, + .set_umac_addr = dwmac100_set_umac_addr, + .get_umac_addr = dwmac100_get_umac_addr, + .set_mac_loopback = dwmac100_set_mac_loopback, +}; + +int dwmac100_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tDWMAC100\n"); + + mac->pcsr = priv->ioaddr; + mac->link.duplex = MAC_CONTROL_F; + mac->link.speed10 = 0; + mac->link.speed100 = 0; + mac->link.speed1000 = 0; + mac->link.speed_mask = MAC_CONTROL_PS; + mac->mii.addr = MAC_MII_ADDR; + mac->mii.data = MAC_MII_DATA; + mac->mii.addr_shift = 11; + mac->mii.addr_mask = 0x0000F800; + mac->mii.reg_shift = 6; + mac->mii.reg_mask = 0x000007C0; + mac->mii.clk_csr_shift = 2; + mac->mii.clk_csr_mask = GENMASK(5, 2); + + return 0; +} diff --git a/devices/stmmac/dwmac100_core-6.1-orig.c b/devices/stmmac/dwmac100_core-6.1-orig.c new file mode 100644 index 00000000..a6e8d7bd --- /dev/null +++ b/devices/stmmac/dwmac100_core-6.1-orig.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + This only implements the mac core functions for this chip. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include "stmmac.h" +#include "dwmac100.h" + +static void dwmac100_core_init(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MAC_CONTROL); + + value |= MAC_CORE_INIT; + + writel(value, ioaddr + MAC_CONTROL); + +#ifdef STMMAC_VLAN_TAG_USED + writel(ETH_P_8021Q, ioaddr + MAC_VLAN1); +#endif +} + +static void dwmac100_dump_mac_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + + reg_space[MAC_CONTROL / 4] = readl(ioaddr + MAC_CONTROL); + reg_space[MAC_ADDR_HIGH / 4] = readl(ioaddr + MAC_ADDR_HIGH); + reg_space[MAC_ADDR_LOW / 4] = readl(ioaddr + MAC_ADDR_LOW); + reg_space[MAC_HASH_HIGH / 4] = readl(ioaddr + MAC_HASH_HIGH); + reg_space[MAC_HASH_LOW / 4] = readl(ioaddr + MAC_HASH_LOW); + reg_space[MAC_FLOW_CTRL / 4] = readl(ioaddr + MAC_FLOW_CTRL); + reg_space[MAC_VLAN1 / 4] = readl(ioaddr + MAC_VLAN1); + reg_space[MAC_VLAN2 / 4] = readl(ioaddr + MAC_VLAN2); +} + +static int dwmac100_rx_ipc_enable(struct mac_device_info *hw) +{ + return 0; +} + +static int dwmac100_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + return 0; +} + +static void dwmac100_set_umac_addr(struct mac_device_info *hw, + const unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void dwmac100_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void dwmac100_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 value = readl(ioaddr + MAC_CONTROL); + + if (dev->flags & IFF_PROMISC) { + value |= MAC_CONTROL_PR; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO | + MAC_CONTROL_HP); + } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE) + || (dev->flags & IFF_ALLMULTI)) { + value |= MAC_CONTROL_PM; + value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO); + writel(0xffffffff, ioaddr + MAC_HASH_HIGH); + writel(0xffffffff, ioaddr + MAC_HASH_LOW); + } else if (netdev_mc_empty(dev)) { /* no multicast */ + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | + MAC_CONTROL_HO | MAC_CONTROL_HP); + } else { + u32 mc_filter[2]; + struct netdev_hw_addr *ha; + + /* Perfect filter mode for physical address and Hash + * filter for multicast + */ + value |= MAC_CONTROL_HP; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | + MAC_CONTROL_IF | MAC_CONTROL_HO); + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table + */ + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + MAC_HASH_LOW); + writel(mc_filter[1], ioaddr + MAC_HASH_HIGH); + } + + writel(value, ioaddr + MAC_CONTROL); +} + +static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int flow = MAC_FLOW_CTRL_ENABLE; + + if (duplex) + flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT); + writel(flow, ioaddr + MAC_FLOW_CTRL); +} + +/* No PMT module supported on ST boards with this Eth chip. */ +static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode) +{ + return; +} + +static void dwmac100_set_mac_loopback(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + MAC_CONTROL); + + if (enable) + value |= MAC_CONTROL_OM; + else + value &= ~MAC_CONTROL_OM; + + writel(value, ioaddr + MAC_CONTROL); +} + +const struct stmmac_ops dwmac100_ops = { + .core_init = dwmac100_core_init, + .set_mac = stmmac_set_mac, + .rx_ipc = dwmac100_rx_ipc_enable, + .dump_regs = dwmac100_dump_mac_regs, + .host_irq_status = dwmac100_irq_status, + .set_filter = dwmac100_set_filter, + .flow_ctrl = dwmac100_flow_ctrl, + .pmt = dwmac100_pmt, + .set_umac_addr = dwmac100_set_umac_addr, + .get_umac_addr = dwmac100_get_umac_addr, + .set_mac_loopback = dwmac100_set_mac_loopback, +}; + +int dwmac100_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tDWMAC100\n"); + + mac->pcsr = priv->ioaddr; + mac->link.duplex = MAC_CONTROL_F; + mac->link.speed10 = 0; + mac->link.speed100 = 0; + mac->link.speed1000 = 0; + mac->link.speed_mask = MAC_CONTROL_PS; + mac->mii.addr = MAC_MII_ADDR; + mac->mii.data = MAC_MII_DATA; + mac->mii.addr_shift = 11; + mac->mii.addr_mask = 0x0000F800; + mac->mii.reg_shift = 6; + mac->mii.reg_mask = 0x000007C0; + mac->mii.clk_csr_shift = 2; + mac->mii.clk_csr_mask = GENMASK(5, 2); + + return 0; +} diff --git a/devices/stmmac/dwmac100_dma-6.1-ethercat.c b/devices/stmmac/dwmac100_dma-6.1-ethercat.c new file mode 100644 index 00000000..d669b8a5 --- /dev/null +++ b/devices/stmmac/dwmac100_dma-6.1-ethercat.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + This contains the functions to handle the dma. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "dwmac100-6.1-ethercat.h" +#include "dwmac_dma-6.1-ethercat.h" + +static void dwmac100_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) +{ + /* Enable Application Access by writing to DMA CSR0 */ + writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT), + ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); +} + +static void dwmac100_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_rx_phy, u32 chan) +{ + /* RX descriptor base addr lists must be written into DMA CSR3 */ + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR); +} + +static void dwmac100_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_tx_phy, u32 chan) +{ + /* TX descriptor base addr lists must be written into DMA CSR4 */ + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR); +} + +/* Store and Forward capability is not used at all. + * + * The transmit threshold can be programmed by setting the TTC bits in the DMA + * control register. + */ +static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (mode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (mode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else + csr6 |= DMA_CONTROL_TTC_128; + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++) + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); + + reg_space[DMA_CUR_TX_BUF_ADDR / 4] = + readl(ioaddr + DMA_CUR_TX_BUF_ADDR); + reg_space[DMA_CUR_RX_BUF_ADDR / 4] = + readl(ioaddr + DMA_CUR_RX_BUF_ADDR); +} + +/* DMA controller has two counters to track the number of the missed frames. */ +static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, + void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); + + if (unlikely(csr8)) { + if (csr8 & DMA_MISSED_FRAME_OVE) { + stats->rx_over_errors += 0x800; + x->rx_overflow_cntr += 0x800; + } else { + unsigned int ove_cntr; + ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); + stats->rx_over_errors += ove_cntr; + x->rx_overflow_cntr += ove_cntr; + } + + if (csr8 & DMA_MISSED_FRAME_OVE_M) { + stats->rx_missed_errors += 0xffff; + x->rx_missed_cntr += 0xffff; + } else { + unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); + stats->rx_missed_errors += miss_f; + x->rx_missed_cntr += miss_f; + } + } +} + +const struct stmmac_dma_ops dwmac100_dma_ops = { + .reset = dwmac_dma_reset, + .init = dwmac100_dma_init, + .init_rx_chan = dwmac100_dma_init_rx, + .init_tx_chan = dwmac100_dma_init_tx, + .dump_regs = dwmac100_dump_dma_regs, + .dma_tx_mode = dwmac100_dma_operation_mode_tx, + .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, + .enable_dma_transmission = dwmac_enable_dma_transmission, + .enable_dma_irq = dwmac_enable_dma_irq, + .disable_dma_irq = dwmac_disable_dma_irq, + .start_tx = dwmac_dma_start_tx, + .stop_tx = dwmac_dma_stop_tx, + .start_rx = dwmac_dma_start_rx, + .stop_rx = dwmac_dma_stop_rx, + .dma_interrupt = dwmac_dma_interrupt, +}; diff --git a/devices/stmmac/dwmac100_dma-6.1-orig.c b/devices/stmmac/dwmac100_dma-6.1-orig.c new file mode 100644 index 00000000..8f0d9bc7 --- /dev/null +++ b/devices/stmmac/dwmac100_dma-6.1-orig.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + This contains the functions to handle the dma. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "dwmac100.h" +#include "dwmac_dma.h" + +static void dwmac100_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) +{ + /* Enable Application Access by writing to DMA CSR0 */ + writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT), + ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); +} + +static void dwmac100_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_rx_phy, u32 chan) +{ + /* RX descriptor base addr lists must be written into DMA CSR3 */ + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR); +} + +static void dwmac100_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_tx_phy, u32 chan) +{ + /* TX descriptor base addr lists must be written into DMA CSR4 */ + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR); +} + +/* Store and Forward capability is not used at all. + * + * The transmit threshold can be programmed by setting the TTC bits in the DMA + * control register. + */ +static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (mode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (mode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else + csr6 |= DMA_CONTROL_TTC_128; + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++) + reg_space[DMA_BUS_MODE / 4 + i] = + readl(ioaddr + DMA_BUS_MODE + i * 4); + + reg_space[DMA_CUR_TX_BUF_ADDR / 4] = + readl(ioaddr + DMA_CUR_TX_BUF_ADDR); + reg_space[DMA_CUR_RX_BUF_ADDR / 4] = + readl(ioaddr + DMA_CUR_RX_BUF_ADDR); +} + +/* DMA controller has two counters to track the number of the missed frames. */ +static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, + void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); + + if (unlikely(csr8)) { + if (csr8 & DMA_MISSED_FRAME_OVE) { + stats->rx_over_errors += 0x800; + x->rx_overflow_cntr += 0x800; + } else { + unsigned int ove_cntr; + ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); + stats->rx_over_errors += ove_cntr; + x->rx_overflow_cntr += ove_cntr; + } + + if (csr8 & DMA_MISSED_FRAME_OVE_M) { + stats->rx_missed_errors += 0xffff; + x->rx_missed_cntr += 0xffff; + } else { + unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); + stats->rx_missed_errors += miss_f; + x->rx_missed_cntr += miss_f; + } + } +} + +const struct stmmac_dma_ops dwmac100_dma_ops = { + .reset = dwmac_dma_reset, + .init = dwmac100_dma_init, + .init_rx_chan = dwmac100_dma_init_rx, + .init_tx_chan = dwmac100_dma_init_tx, + .dump_regs = dwmac100_dump_dma_regs, + .dma_tx_mode = dwmac100_dma_operation_mode_tx, + .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, + .enable_dma_transmission = dwmac_enable_dma_transmission, + .enable_dma_irq = dwmac_enable_dma_irq, + .disable_dma_irq = dwmac_disable_dma_irq, + .start_tx = dwmac_dma_start_tx, + .stop_tx = dwmac_dma_stop_tx, + .start_rx = dwmac_dma_start_rx, + .stop_rx = dwmac_dma_stop_rx, + .dma_interrupt = dwmac_dma_interrupt, +}; diff --git a/devices/stmmac/dwmac4-6.1-ethercat.h b/devices/stmmac/dwmac4-6.1-ethercat.h new file mode 100644 index 00000000..3a4ffd7d --- /dev/null +++ b/devices/stmmac/dwmac4-6.1-ethercat.h @@ -0,0 +1,520 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DWMAC4 Header file. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#ifndef __DWMAC4_H__ +#define __DWMAC4_H__ + +#include "common-6.1-ethercat.h" + +/* MAC registers */ +#define GMAC_CONFIG 0x00000000 +#define GMAC_EXT_CONFIG 0x00000004 +#define GMAC_PACKET_FILTER 0x00000008 +#define GMAC_HASH_TAB(x) (0x10 + (x) * 4) +#define GMAC_VLAN_TAG 0x00000050 +#define GMAC_VLAN_TAG_DATA 0x00000054 +#define GMAC_VLAN_HASH_TABLE 0x00000058 +#define GMAC_RX_FLOW_CTRL 0x00000090 +#define GMAC_VLAN_INCL 0x00000060 +#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) +#define GMAC_TXQ_PRTY_MAP0 0x98 +#define GMAC_TXQ_PRTY_MAP1 0x9C +#define GMAC_RXQ_CTRL0 0x000000a0 +#define GMAC_RXQ_CTRL1 0x000000a4 +#define GMAC_RXQ_CTRL2 0x000000a8 +#define GMAC_RXQ_CTRL3 0x000000ac +#define GMAC_INT_STATUS 0x000000b0 +#define GMAC_INT_EN 0x000000b4 +#define GMAC_1US_TIC_COUNTER 0x000000dc +#define GMAC_PCS_BASE 0x000000e0 +#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 +#define GMAC_PMT 0x000000c0 +#define GMAC_DEBUG 0x00000114 +#define GMAC_HW_FEATURE0 0x0000011c +#define GMAC_HW_FEATURE1 0x00000120 +#define GMAC_HW_FEATURE2 0x00000124 +#define GMAC_HW_FEATURE3 0x00000128 +#define GMAC_MDIO_ADDR 0x00000200 +#define GMAC_MDIO_DATA 0x00000204 +#define GMAC_GPIO_STATUS 0x0000020C +#define GMAC_ARP_ADDR 0x00000210 +#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) +#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) +#define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30) +#define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30) +#define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30) +#define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30) +#define GMAC_TIMESTAMP_STATUS 0x00000b20 + +/* RX Queues Routing */ +#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0) +#define GMAC_RXQCTRL_AVCPQ_SHIFT 0 +#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4) +#define GMAC_RXQCTRL_PTPQ_SHIFT 4 +#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8) +#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8 +#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12) +#define GMAC_RXQCTRL_UPQ_SHIFT 12 +#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16) +#define GMAC_RXQCTRL_MCBCQ_SHIFT 16 +#define GMAC_RXQCTRL_MCBCQEN BIT(20) +#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20 +#define GMAC_RXQCTRL_TACPQE BIT(21) +#define GMAC_RXQCTRL_TACPQE_SHIFT 21 +#define GMAC_RXQCTRL_FPRQ GENMASK(26, 24) +#define GMAC_RXQCTRL_FPRQ_SHIFT 24 + +/* MAC Packet Filtering */ +#define GMAC_PACKET_FILTER_PR BIT(0) +#define GMAC_PACKET_FILTER_HMC BIT(2) +#define GMAC_PACKET_FILTER_PM BIT(4) +#define GMAC_PACKET_FILTER_PCF BIT(7) +#define GMAC_PACKET_FILTER_HPF BIT(10) +#define GMAC_PACKET_FILTER_VTFE BIT(16) +#define GMAC_PACKET_FILTER_IPFE BIT(20) +#define GMAC_PACKET_FILTER_RA BIT(31) + +#define GMAC_MAX_PERFECT_ADDRESSES 128 + +/* MAC VLAN */ +#define GMAC_VLAN_EDVLP BIT(26) +#define GMAC_VLAN_VTHM BIT(25) +#define GMAC_VLAN_DOVLTC BIT(20) +#define GMAC_VLAN_ESVL BIT(18) +#define GMAC_VLAN_ETV BIT(16) +#define GMAC_VLAN_VID GENMASK(15, 0) +#define GMAC_VLAN_VLTI BIT(20) +#define GMAC_VLAN_CSVL BIT(19) +#define GMAC_VLAN_VLC GENMASK(17, 16) +#define GMAC_VLAN_VLC_SHIFT 16 +#define GMAC_VLAN_VLHT GENMASK(15, 0) + +/* MAC VLAN Tag */ +#define GMAC_VLAN_TAG_VID GENMASK(15, 0) +#define GMAC_VLAN_TAG_ETV BIT(16) + +/* MAC VLAN Tag Control */ +#define GMAC_VLAN_TAG_CTRL_OB BIT(0) +#define GMAC_VLAN_TAG_CTRL_CT BIT(1) +#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2) +#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2 +#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21) +#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21 +#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24) + +#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) +#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) +#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) +#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) + +/* MAC VLAN Tag Data/Filter */ +#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0) +#define GMAC_VLAN_TAG_DATA_VEN BIT(16) +#define GMAC_VLAN_TAG_DATA_ETV BIT(17) + +/* MAC RX Queue Enable */ +#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) +#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) +#define GMAC_RX_DCB_QUEUE_ENABLE(queue) BIT(((queue) * 2) + 1) + +/* MAC Flow Control RX */ +#define GMAC_RX_FLOW_CTRL_RFE BIT(0) + +/* RX Queues Priorities */ +#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) +#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8) + +/* TX Queues Priorities */ +#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) +#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8) + +/* MAC Flow Control TX */ +#define GMAC_TX_FLOW_CTRL_TFE BIT(1) +#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 + +/* MAC Interrupt bitmap*/ +#define GMAC_INT_RGSMIIS BIT(0) +#define GMAC_INT_PCS_LINK BIT(1) +#define GMAC_INT_PCS_ANE BIT(2) +#define GMAC_INT_PCS_PHYIS BIT(3) +#define GMAC_INT_PMT_EN BIT(4) +#define GMAC_INT_LPI_EN BIT(5) +#define GMAC_INT_TSIE BIT(12) + +#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ + GMAC_INT_PCS_ANE) + +#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \ + GMAC_INT_TSIE) + +enum dwmac4_irq_status { + time_stamp_irq = 0x00001000, + mmc_rx_csum_offload_irq = 0x00000800, + mmc_tx_irq = 0x00000400, + mmc_rx_irq = 0x00000200, + mmc_irq = 0x00000100, + lpi_irq = 0x00000020, + pmt_irq = 0x00000010, +}; + +/* MAC PMT bitmap */ +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +/* Energy Efficient Ethernet (EEE) for GMAC4 + * + * LPI status, timer and control register offset + */ +#define GMAC4_LPI_CTRL_STATUS 0xd0 +#define GMAC4_LPI_TIMER_CTRL 0xd4 +#define GMAC4_LPI_ENTRY_TIMER 0xd8 +#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc + +/* LPI control and status defines */ +#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */ +#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */ +#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ +#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ +#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ +#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */ +#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */ +#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */ +#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */ + +/* MAC Debug bitmap */ +#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) +#define GMAC_DEBUG_TFCSTS_SHIFT 17 +#define GMAC_DEBUG_TFCSTS_IDLE 0 +#define GMAC_DEBUG_TFCSTS_WAIT 1 +#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2 +#define GMAC_DEBUG_TFCSTS_XFER 3 +#define GMAC_DEBUG_TPESTS BIT(16) +#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1) +#define GMAC_DEBUG_RFCFCSTS_SHIFT 1 +#define GMAC_DEBUG_RPESTS BIT(0) + +/* MAC config */ +#define GMAC_CONFIG_ARPEN BIT(31) +#define GMAC_CONFIG_SARC GENMASK(30, 28) +#define GMAC_CONFIG_SARC_SHIFT 28 +#define GMAC_CONFIG_IPC BIT(27) +#define GMAC_CONFIG_IPG GENMASK(26, 24) +#define GMAC_CONFIG_IPG_SHIFT 24 +#define GMAC_CONFIG_2K BIT(22) +#define GMAC_CONFIG_ACS BIT(20) +#define GMAC_CONFIG_BE BIT(18) +#define GMAC_CONFIG_JD BIT(17) +#define GMAC_CONFIG_JE BIT(16) +#define GMAC_CONFIG_PS BIT(15) +#define GMAC_CONFIG_FES BIT(14) +#define GMAC_CONFIG_FES_SHIFT 14 +#define GMAC_CONFIG_DM BIT(13) +#define GMAC_CONFIG_LM BIT(12) +#define GMAC_CONFIG_DCRS BIT(9) +#define GMAC_CONFIG_TE BIT(1) +#define GMAC_CONFIG_RE BIT(0) + +/* MAC extended config */ +#define GMAC_CONFIG_EIPG GENMASK(29, 25) +#define GMAC_CONFIG_EIPG_SHIFT 25 +#define GMAC_CONFIG_EIPG_EN BIT(24) +#define GMAC_CONFIG_HDSMS GENMASK(22, 20) +#define GMAC_CONFIG_HDSMS_SHIFT 20 +#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT) + +/* MAC HW features0 bitmap */ +#define GMAC_HW_FEAT_SAVLANINS BIT(27) +#define GMAC_HW_FEAT_ADDMAC BIT(18) +#define GMAC_HW_FEAT_RXCOESEL BIT(16) +#define GMAC_HW_FEAT_TXCOSEL BIT(14) +#define GMAC_HW_FEAT_EEESEL BIT(13) +#define GMAC_HW_FEAT_TSSEL BIT(12) +#define GMAC_HW_FEAT_ARPOFFSEL BIT(9) +#define GMAC_HW_FEAT_MMCSEL BIT(8) +#define GMAC_HW_FEAT_MGKSEL BIT(7) +#define GMAC_HW_FEAT_RWKSEL BIT(6) +#define GMAC_HW_FEAT_SMASEL BIT(5) +#define GMAC_HW_FEAT_VLHASH BIT(4) +#define GMAC_HW_FEAT_PCSSEL BIT(3) +#define GMAC_HW_FEAT_HDSEL BIT(2) +#define GMAC_HW_FEAT_GMIISEL BIT(1) +#define GMAC_HW_FEAT_MIISEL BIT(0) + +/* MAC HW features1 bitmap */ +#define GMAC_HW_FEAT_L3L4FNUM GENMASK(30, 27) +#define GMAC_HW_HASH_TB_SZ GENMASK(25, 24) +#define GMAC_HW_FEAT_AVSEL BIT(20) +#define GMAC_HW_TSOEN BIT(18) +#define GMAC_HW_FEAT_SPHEN BIT(17) +#define GMAC_HW_ADDR64 GENMASK(15, 14) +#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6) +#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) + +/* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_AUXSNAPNUM GENMASK(30, 28) +#define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24) +#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) +#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) +#define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) +#define GMAC_HW_FEAT_RXQCNT GENMASK(3, 0) + +/* MAC HW features3 bitmap */ +#define GMAC_HW_FEAT_ASP GENMASK(29, 28) +#define GMAC_HW_FEAT_TBSSEL BIT(27) +#define GMAC_HW_FEAT_FPESEL BIT(26) +#define GMAC_HW_FEAT_ESTWID GENMASK(21, 20) +#define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17) +#define GMAC_HW_FEAT_ESTSEL BIT(16) +#define GMAC_HW_FEAT_FRPES GENMASK(14, 13) +#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11) +#define GMAC_HW_FEAT_FRPSEL BIT(10) +#define GMAC_HW_FEAT_DVLAN BIT(5) +#define GMAC_HW_FEAT_NRVF GENMASK(2, 0) + +/* GMAC GPIO Status reg */ +#define GMAC_GPO0 BIT(16) +#define GMAC_GPO1 BIT(17) +#define GMAC_GPO2 BIT(18) +#define GMAC_GPO3 BIT(19) + +/* MAC HW ADDR regs */ +#define GMAC_HI_DCS GENMASK(18, 16) +#define GMAC_HI_DCS_SHIFT 16 +#define GMAC_HI_REG_AE BIT(31) + +/* L3/L4 Filters regs */ +#define GMAC_L4DPIM0 BIT(21) +#define GMAC_L4DPM0 BIT(20) +#define GMAC_L4SPIM0 BIT(19) +#define GMAC_L4SPM0 BIT(18) +#define GMAC_L4PEN0 BIT(16) +#define GMAC_L3DAIM0 BIT(5) +#define GMAC_L3DAM0 BIT(4) +#define GMAC_L3SAIM0 BIT(3) +#define GMAC_L3SAM0 BIT(2) +#define GMAC_L3PEN0 BIT(0) +#define GMAC_L4DP0 GENMASK(31, 16) +#define GMAC_L4DP0_SHIFT 16 +#define GMAC_L4SP0 GENMASK(15, 0) + +/* MAC Timestamp Status */ +#define GMAC_TIMESTAMP_AUXTSTRIG BIT(2) +#define GMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) +#define GMAC_TIMESTAMP_ATSNS_SHIFT 25 + +/* MTL registers */ +#define MTL_OPERATION_MODE 0x00000c00 +#define MTL_FRPE BIT(15) +#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5) +#define MTL_OPERATION_SCHALG_WRR (0x0 << 5) +#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5) +#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5) +#define MTL_OPERATION_SCHALG_SP (0x3 << 5) +#define MTL_OPERATION_RAA BIT(2) +#define MTL_OPERATION_RAA_SP (0x0 << 2) +#define MTL_OPERATION_RAA_WSP (0x1 << 2) + +#define MTL_INT_STATUS 0x00000c20 +#define MTL_INT_QX(x) BIT(x) + +#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */ +#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */ +#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0) +#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0) +#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x)) +#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q))) + +#define MTL_CHAN_BASE_ADDR 0x00000d00 +#define MTL_CHAN_BASE_OFFSET 0x40 +#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \ + (x * MTL_CHAN_BASE_OFFSET)) + +#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x) +#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8) +#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c) +#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30) +#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38) + +#define MTL_OP_MODE_RSF BIT(5) +#define MTL_OP_MODE_TXQEN_MASK GENMASK(3, 2) +#define MTL_OP_MODE_TXQEN_AV BIT(2) +#define MTL_OP_MODE_TXQEN BIT(3) +#define MTL_OP_MODE_TSF BIT(1) + +#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16) +#define MTL_OP_MODE_TQS_SHIFT 16 + +#define MTL_OP_MODE_TTC_MASK 0x70 +#define MTL_OP_MODE_TTC_SHIFT 4 + +#define MTL_OP_MODE_TTC_32 0 +#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT) + +#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20) +#define MTL_OP_MODE_RQS_SHIFT 20 + +#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14) +#define MTL_OP_MODE_RFD_SHIFT 14 + +#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8) +#define MTL_OP_MODE_RFA_SHIFT 8 + +#define MTL_OP_MODE_EHFC BIT(7) + +#define MTL_OP_MODE_RTC_MASK 0x18 +#define MTL_OP_MODE_RTC_SHIFT 3 + +#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT) +#define MTL_OP_MODE_RTC_64 0 +#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT) +#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT) + +/* MTL ETS Control register */ +#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10 +#define MTL_ETS_CTRL_BASE_OFFSET 0x40 +#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \ + ((x) * MTL_ETS_CTRL_BASE_OFFSET)) + +#define MTL_ETS_CTRL_CC BIT(3) +#define MTL_ETS_CTRL_AVALG BIT(2) + +/* MTL Queue Quantum Weight */ +#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18 +#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40 +#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \ + ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET)) +#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0) + +/* MTL sendSlopeCredit register */ +#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c +#define MTL_SEND_SLP_CRED_OFFSET 0x40 +#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \ + ((x) * MTL_SEND_SLP_CRED_OFFSET)) + +#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0) + +/* MTL hiCredit register */ +#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20 +#define MTL_HIGH_CRED_OFFSET 0x40 +#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \ + ((x) * MTL_HIGH_CRED_OFFSET)) + +#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0) + +/* MTL loCredit register */ +#define MTL_LOW_CRED_BASE_ADDR 0x00000d24 +#define MTL_LOW_CRED_OFFSET 0x40 +#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \ + ((x) * MTL_LOW_CRED_OFFSET)) + +#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0) + +/* MTL debug */ +#define MTL_DEBUG_TXSTSFSTS BIT(5) +#define MTL_DEBUG_TXFSTS BIT(4) +#define MTL_DEBUG_TWCSTS BIT(3) + +/* MTL debug: Tx FIFO Read Controller Status */ +#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_TRCSTS_SHIFT 1 +#define MTL_DEBUG_TRCSTS_IDLE 0 +#define MTL_DEBUG_TRCSTS_READ 1 +#define MTL_DEBUG_TRCSTS_TXW 2 +#define MTL_DEBUG_TRCSTS_WRITE 3 +#define MTL_DEBUG_TXPAUSED BIT(0) + +/* MAC debug: GMII or MII Transmit Protocol Engine Status */ +#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4) +#define MTL_DEBUG_RXFSTS_SHIFT 4 +#define MTL_DEBUG_RXFSTS_EMPTY 0 +#define MTL_DEBUG_RXFSTS_BT 1 +#define MTL_DEBUG_RXFSTS_AT 2 +#define MTL_DEBUG_RXFSTS_FULL 3 +#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_RRCSTS_SHIFT 1 +#define MTL_DEBUG_RRCSTS_IDLE 0 +#define MTL_DEBUG_RRCSTS_RDATA 1 +#define MTL_DEBUG_RRCSTS_RSTAT 2 +#define MTL_DEBUG_RRCSTS_FLUSH 3 +#define MTL_DEBUG_RWCSTS BIT(0) + +/* MTL interrupt */ +#define MTL_RX_OVERFLOW_INT_EN BIT(24) +#define MTL_RX_OVERFLOW_INT BIT(16) + +/* Default operating mode of the MAC */ +#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \ + GMAC_CONFIG_BE | GMAC_CONFIG_DCRS | \ + GMAC_CONFIG_JE) + +/* To dump the core regs excluding the Address Registers */ +#define GMAC_REG_NUM 132 + +/* MTL debug */ +#define MTL_DEBUG_TXSTSFSTS BIT(5) +#define MTL_DEBUG_TXFSTS BIT(4) +#define MTL_DEBUG_TWCSTS BIT(3) + +/* MTL debug: Tx FIFO Read Controller Status */ +#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_TRCSTS_SHIFT 1 +#define MTL_DEBUG_TRCSTS_IDLE 0 +#define MTL_DEBUG_TRCSTS_READ 1 +#define MTL_DEBUG_TRCSTS_TXW 2 +#define MTL_DEBUG_TRCSTS_WRITE 3 +#define MTL_DEBUG_TXPAUSED BIT(0) + +/* MAC debug: GMII or MII Transmit Protocol Engine Status */ +#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4) +#define MTL_DEBUG_RXFSTS_SHIFT 4 +#define MTL_DEBUG_RXFSTS_EMPTY 0 +#define MTL_DEBUG_RXFSTS_BT 1 +#define MTL_DEBUG_RXFSTS_AT 2 +#define MTL_DEBUG_RXFSTS_FULL 3 +#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_RRCSTS_SHIFT 1 +#define MTL_DEBUG_RRCSTS_IDLE 0 +#define MTL_DEBUG_RRCSTS_RDATA 1 +#define MTL_DEBUG_RRCSTS_RSTAT 2 +#define MTL_DEBUG_RRCSTS_FLUSH 3 +#define MTL_DEBUG_RWCSTS BIT(0) + +/* SGMII/RGMII status register */ +#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0) +#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1) +#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4) +#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16) +#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17) +#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17 +#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19) +#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20) +#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21) +/* LNKMOD */ +#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1 +/* LNKSPEED */ +#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2 +#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1 +#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0 + +extern const struct stmmac_dma_ops dwmac4_dma_ops; +extern const struct stmmac_dma_ops dwmac410_dma_ops; +#endif /* __DWMAC4_H__ */ diff --git a/devices/stmmac/dwmac4-6.1-orig.h b/devices/stmmac/dwmac4-6.1-orig.h new file mode 100644 index 00000000..12c0e608 --- /dev/null +++ b/devices/stmmac/dwmac4-6.1-orig.h @@ -0,0 +1,520 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DWMAC4 Header file. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#ifndef __DWMAC4_H__ +#define __DWMAC4_H__ + +#include "common.h" + +/* MAC registers */ +#define GMAC_CONFIG 0x00000000 +#define GMAC_EXT_CONFIG 0x00000004 +#define GMAC_PACKET_FILTER 0x00000008 +#define GMAC_HASH_TAB(x) (0x10 + (x) * 4) +#define GMAC_VLAN_TAG 0x00000050 +#define GMAC_VLAN_TAG_DATA 0x00000054 +#define GMAC_VLAN_HASH_TABLE 0x00000058 +#define GMAC_RX_FLOW_CTRL 0x00000090 +#define GMAC_VLAN_INCL 0x00000060 +#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) +#define GMAC_TXQ_PRTY_MAP0 0x98 +#define GMAC_TXQ_PRTY_MAP1 0x9C +#define GMAC_RXQ_CTRL0 0x000000a0 +#define GMAC_RXQ_CTRL1 0x000000a4 +#define GMAC_RXQ_CTRL2 0x000000a8 +#define GMAC_RXQ_CTRL3 0x000000ac +#define GMAC_INT_STATUS 0x000000b0 +#define GMAC_INT_EN 0x000000b4 +#define GMAC_1US_TIC_COUNTER 0x000000dc +#define GMAC_PCS_BASE 0x000000e0 +#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 +#define GMAC_PMT 0x000000c0 +#define GMAC_DEBUG 0x00000114 +#define GMAC_HW_FEATURE0 0x0000011c +#define GMAC_HW_FEATURE1 0x00000120 +#define GMAC_HW_FEATURE2 0x00000124 +#define GMAC_HW_FEATURE3 0x00000128 +#define GMAC_MDIO_ADDR 0x00000200 +#define GMAC_MDIO_DATA 0x00000204 +#define GMAC_GPIO_STATUS 0x0000020C +#define GMAC_ARP_ADDR 0x00000210 +#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) +#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) +#define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30) +#define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30) +#define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30) +#define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30) +#define GMAC_TIMESTAMP_STATUS 0x00000b20 + +/* RX Queues Routing */ +#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0) +#define GMAC_RXQCTRL_AVCPQ_SHIFT 0 +#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4) +#define GMAC_RXQCTRL_PTPQ_SHIFT 4 +#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8) +#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8 +#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12) +#define GMAC_RXQCTRL_UPQ_SHIFT 12 +#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16) +#define GMAC_RXQCTRL_MCBCQ_SHIFT 16 +#define GMAC_RXQCTRL_MCBCQEN BIT(20) +#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20 +#define GMAC_RXQCTRL_TACPQE BIT(21) +#define GMAC_RXQCTRL_TACPQE_SHIFT 21 +#define GMAC_RXQCTRL_FPRQ GENMASK(26, 24) +#define GMAC_RXQCTRL_FPRQ_SHIFT 24 + +/* MAC Packet Filtering */ +#define GMAC_PACKET_FILTER_PR BIT(0) +#define GMAC_PACKET_FILTER_HMC BIT(2) +#define GMAC_PACKET_FILTER_PM BIT(4) +#define GMAC_PACKET_FILTER_PCF BIT(7) +#define GMAC_PACKET_FILTER_HPF BIT(10) +#define GMAC_PACKET_FILTER_VTFE BIT(16) +#define GMAC_PACKET_FILTER_IPFE BIT(20) +#define GMAC_PACKET_FILTER_RA BIT(31) + +#define GMAC_MAX_PERFECT_ADDRESSES 128 + +/* MAC VLAN */ +#define GMAC_VLAN_EDVLP BIT(26) +#define GMAC_VLAN_VTHM BIT(25) +#define GMAC_VLAN_DOVLTC BIT(20) +#define GMAC_VLAN_ESVL BIT(18) +#define GMAC_VLAN_ETV BIT(16) +#define GMAC_VLAN_VID GENMASK(15, 0) +#define GMAC_VLAN_VLTI BIT(20) +#define GMAC_VLAN_CSVL BIT(19) +#define GMAC_VLAN_VLC GENMASK(17, 16) +#define GMAC_VLAN_VLC_SHIFT 16 +#define GMAC_VLAN_VLHT GENMASK(15, 0) + +/* MAC VLAN Tag */ +#define GMAC_VLAN_TAG_VID GENMASK(15, 0) +#define GMAC_VLAN_TAG_ETV BIT(16) + +/* MAC VLAN Tag Control */ +#define GMAC_VLAN_TAG_CTRL_OB BIT(0) +#define GMAC_VLAN_TAG_CTRL_CT BIT(1) +#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2) +#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2 +#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21) +#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21 +#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24) + +#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) +#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) +#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) +#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) + +/* MAC VLAN Tag Data/Filter */ +#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0) +#define GMAC_VLAN_TAG_DATA_VEN BIT(16) +#define GMAC_VLAN_TAG_DATA_ETV BIT(17) + +/* MAC RX Queue Enable */ +#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) +#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) +#define GMAC_RX_DCB_QUEUE_ENABLE(queue) BIT(((queue) * 2) + 1) + +/* MAC Flow Control RX */ +#define GMAC_RX_FLOW_CTRL_RFE BIT(0) + +/* RX Queues Priorities */ +#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) +#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8) + +/* TX Queues Priorities */ +#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) +#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8) + +/* MAC Flow Control TX */ +#define GMAC_TX_FLOW_CTRL_TFE BIT(1) +#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 + +/* MAC Interrupt bitmap*/ +#define GMAC_INT_RGSMIIS BIT(0) +#define GMAC_INT_PCS_LINK BIT(1) +#define GMAC_INT_PCS_ANE BIT(2) +#define GMAC_INT_PCS_PHYIS BIT(3) +#define GMAC_INT_PMT_EN BIT(4) +#define GMAC_INT_LPI_EN BIT(5) +#define GMAC_INT_TSIE BIT(12) + +#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ + GMAC_INT_PCS_ANE) + +#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \ + GMAC_INT_TSIE) + +enum dwmac4_irq_status { + time_stamp_irq = 0x00001000, + mmc_rx_csum_offload_irq = 0x00000800, + mmc_tx_irq = 0x00000400, + mmc_rx_irq = 0x00000200, + mmc_irq = 0x00000100, + lpi_irq = 0x00000020, + pmt_irq = 0x00000010, +}; + +/* MAC PMT bitmap */ +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +/* Energy Efficient Ethernet (EEE) for GMAC4 + * + * LPI status, timer and control register offset + */ +#define GMAC4_LPI_CTRL_STATUS 0xd0 +#define GMAC4_LPI_TIMER_CTRL 0xd4 +#define GMAC4_LPI_ENTRY_TIMER 0xd8 +#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc + +/* LPI control and status defines */ +#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */ +#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */ +#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ +#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ +#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ +#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */ +#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */ +#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */ +#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */ + +/* MAC Debug bitmap */ +#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) +#define GMAC_DEBUG_TFCSTS_SHIFT 17 +#define GMAC_DEBUG_TFCSTS_IDLE 0 +#define GMAC_DEBUG_TFCSTS_WAIT 1 +#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2 +#define GMAC_DEBUG_TFCSTS_XFER 3 +#define GMAC_DEBUG_TPESTS BIT(16) +#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1) +#define GMAC_DEBUG_RFCFCSTS_SHIFT 1 +#define GMAC_DEBUG_RPESTS BIT(0) + +/* MAC config */ +#define GMAC_CONFIG_ARPEN BIT(31) +#define GMAC_CONFIG_SARC GENMASK(30, 28) +#define GMAC_CONFIG_SARC_SHIFT 28 +#define GMAC_CONFIG_IPC BIT(27) +#define GMAC_CONFIG_IPG GENMASK(26, 24) +#define GMAC_CONFIG_IPG_SHIFT 24 +#define GMAC_CONFIG_2K BIT(22) +#define GMAC_CONFIG_ACS BIT(20) +#define GMAC_CONFIG_BE BIT(18) +#define GMAC_CONFIG_JD BIT(17) +#define GMAC_CONFIG_JE BIT(16) +#define GMAC_CONFIG_PS BIT(15) +#define GMAC_CONFIG_FES BIT(14) +#define GMAC_CONFIG_FES_SHIFT 14 +#define GMAC_CONFIG_DM BIT(13) +#define GMAC_CONFIG_LM BIT(12) +#define GMAC_CONFIG_DCRS BIT(9) +#define GMAC_CONFIG_TE BIT(1) +#define GMAC_CONFIG_RE BIT(0) + +/* MAC extended config */ +#define GMAC_CONFIG_EIPG GENMASK(29, 25) +#define GMAC_CONFIG_EIPG_SHIFT 25 +#define GMAC_CONFIG_EIPG_EN BIT(24) +#define GMAC_CONFIG_HDSMS GENMASK(22, 20) +#define GMAC_CONFIG_HDSMS_SHIFT 20 +#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT) + +/* MAC HW features0 bitmap */ +#define GMAC_HW_FEAT_SAVLANINS BIT(27) +#define GMAC_HW_FEAT_ADDMAC BIT(18) +#define GMAC_HW_FEAT_RXCOESEL BIT(16) +#define GMAC_HW_FEAT_TXCOSEL BIT(14) +#define GMAC_HW_FEAT_EEESEL BIT(13) +#define GMAC_HW_FEAT_TSSEL BIT(12) +#define GMAC_HW_FEAT_ARPOFFSEL BIT(9) +#define GMAC_HW_FEAT_MMCSEL BIT(8) +#define GMAC_HW_FEAT_MGKSEL BIT(7) +#define GMAC_HW_FEAT_RWKSEL BIT(6) +#define GMAC_HW_FEAT_SMASEL BIT(5) +#define GMAC_HW_FEAT_VLHASH BIT(4) +#define GMAC_HW_FEAT_PCSSEL BIT(3) +#define GMAC_HW_FEAT_HDSEL BIT(2) +#define GMAC_HW_FEAT_GMIISEL BIT(1) +#define GMAC_HW_FEAT_MIISEL BIT(0) + +/* MAC HW features1 bitmap */ +#define GMAC_HW_FEAT_L3L4FNUM GENMASK(30, 27) +#define GMAC_HW_HASH_TB_SZ GENMASK(25, 24) +#define GMAC_HW_FEAT_AVSEL BIT(20) +#define GMAC_HW_TSOEN BIT(18) +#define GMAC_HW_FEAT_SPHEN BIT(17) +#define GMAC_HW_ADDR64 GENMASK(15, 14) +#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6) +#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) + +/* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_AUXSNAPNUM GENMASK(30, 28) +#define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24) +#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) +#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) +#define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) +#define GMAC_HW_FEAT_RXQCNT GENMASK(3, 0) + +/* MAC HW features3 bitmap */ +#define GMAC_HW_FEAT_ASP GENMASK(29, 28) +#define GMAC_HW_FEAT_TBSSEL BIT(27) +#define GMAC_HW_FEAT_FPESEL BIT(26) +#define GMAC_HW_FEAT_ESTWID GENMASK(21, 20) +#define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17) +#define GMAC_HW_FEAT_ESTSEL BIT(16) +#define GMAC_HW_FEAT_FRPES GENMASK(14, 13) +#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11) +#define GMAC_HW_FEAT_FRPSEL BIT(10) +#define GMAC_HW_FEAT_DVLAN BIT(5) +#define GMAC_HW_FEAT_NRVF GENMASK(2, 0) + +/* GMAC GPIO Status reg */ +#define GMAC_GPO0 BIT(16) +#define GMAC_GPO1 BIT(17) +#define GMAC_GPO2 BIT(18) +#define GMAC_GPO3 BIT(19) + +/* MAC HW ADDR regs */ +#define GMAC_HI_DCS GENMASK(18, 16) +#define GMAC_HI_DCS_SHIFT 16 +#define GMAC_HI_REG_AE BIT(31) + +/* L3/L4 Filters regs */ +#define GMAC_L4DPIM0 BIT(21) +#define GMAC_L4DPM0 BIT(20) +#define GMAC_L4SPIM0 BIT(19) +#define GMAC_L4SPM0 BIT(18) +#define GMAC_L4PEN0 BIT(16) +#define GMAC_L3DAIM0 BIT(5) +#define GMAC_L3DAM0 BIT(4) +#define GMAC_L3SAIM0 BIT(3) +#define GMAC_L3SAM0 BIT(2) +#define GMAC_L3PEN0 BIT(0) +#define GMAC_L4DP0 GENMASK(31, 16) +#define GMAC_L4DP0_SHIFT 16 +#define GMAC_L4SP0 GENMASK(15, 0) + +/* MAC Timestamp Status */ +#define GMAC_TIMESTAMP_AUXTSTRIG BIT(2) +#define GMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) +#define GMAC_TIMESTAMP_ATSNS_SHIFT 25 + +/* MTL registers */ +#define MTL_OPERATION_MODE 0x00000c00 +#define MTL_FRPE BIT(15) +#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5) +#define MTL_OPERATION_SCHALG_WRR (0x0 << 5) +#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5) +#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5) +#define MTL_OPERATION_SCHALG_SP (0x3 << 5) +#define MTL_OPERATION_RAA BIT(2) +#define MTL_OPERATION_RAA_SP (0x0 << 2) +#define MTL_OPERATION_RAA_WSP (0x1 << 2) + +#define MTL_INT_STATUS 0x00000c20 +#define MTL_INT_QX(x) BIT(x) + +#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */ +#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */ +#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0) +#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0) +#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x)) +#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q))) + +#define MTL_CHAN_BASE_ADDR 0x00000d00 +#define MTL_CHAN_BASE_OFFSET 0x40 +#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \ + (x * MTL_CHAN_BASE_OFFSET)) + +#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x) +#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8) +#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c) +#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30) +#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38) + +#define MTL_OP_MODE_RSF BIT(5) +#define MTL_OP_MODE_TXQEN_MASK GENMASK(3, 2) +#define MTL_OP_MODE_TXQEN_AV BIT(2) +#define MTL_OP_MODE_TXQEN BIT(3) +#define MTL_OP_MODE_TSF BIT(1) + +#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16) +#define MTL_OP_MODE_TQS_SHIFT 16 + +#define MTL_OP_MODE_TTC_MASK 0x70 +#define MTL_OP_MODE_TTC_SHIFT 4 + +#define MTL_OP_MODE_TTC_32 0 +#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT) + +#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20) +#define MTL_OP_MODE_RQS_SHIFT 20 + +#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14) +#define MTL_OP_MODE_RFD_SHIFT 14 + +#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8) +#define MTL_OP_MODE_RFA_SHIFT 8 + +#define MTL_OP_MODE_EHFC BIT(7) + +#define MTL_OP_MODE_RTC_MASK 0x18 +#define MTL_OP_MODE_RTC_SHIFT 3 + +#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT) +#define MTL_OP_MODE_RTC_64 0 +#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT) +#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT) + +/* MTL ETS Control register */ +#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10 +#define MTL_ETS_CTRL_BASE_OFFSET 0x40 +#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \ + ((x) * MTL_ETS_CTRL_BASE_OFFSET)) + +#define MTL_ETS_CTRL_CC BIT(3) +#define MTL_ETS_CTRL_AVALG BIT(2) + +/* MTL Queue Quantum Weight */ +#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18 +#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40 +#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \ + ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET)) +#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0) + +/* MTL sendSlopeCredit register */ +#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c +#define MTL_SEND_SLP_CRED_OFFSET 0x40 +#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \ + ((x) * MTL_SEND_SLP_CRED_OFFSET)) + +#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0) + +/* MTL hiCredit register */ +#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20 +#define MTL_HIGH_CRED_OFFSET 0x40 +#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \ + ((x) * MTL_HIGH_CRED_OFFSET)) + +#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0) + +/* MTL loCredit register */ +#define MTL_LOW_CRED_BASE_ADDR 0x00000d24 +#define MTL_LOW_CRED_OFFSET 0x40 +#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \ + ((x) * MTL_LOW_CRED_OFFSET)) + +#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0) + +/* MTL debug */ +#define MTL_DEBUG_TXSTSFSTS BIT(5) +#define MTL_DEBUG_TXFSTS BIT(4) +#define MTL_DEBUG_TWCSTS BIT(3) + +/* MTL debug: Tx FIFO Read Controller Status */ +#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_TRCSTS_SHIFT 1 +#define MTL_DEBUG_TRCSTS_IDLE 0 +#define MTL_DEBUG_TRCSTS_READ 1 +#define MTL_DEBUG_TRCSTS_TXW 2 +#define MTL_DEBUG_TRCSTS_WRITE 3 +#define MTL_DEBUG_TXPAUSED BIT(0) + +/* MAC debug: GMII or MII Transmit Protocol Engine Status */ +#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4) +#define MTL_DEBUG_RXFSTS_SHIFT 4 +#define MTL_DEBUG_RXFSTS_EMPTY 0 +#define MTL_DEBUG_RXFSTS_BT 1 +#define MTL_DEBUG_RXFSTS_AT 2 +#define MTL_DEBUG_RXFSTS_FULL 3 +#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_RRCSTS_SHIFT 1 +#define MTL_DEBUG_RRCSTS_IDLE 0 +#define MTL_DEBUG_RRCSTS_RDATA 1 +#define MTL_DEBUG_RRCSTS_RSTAT 2 +#define MTL_DEBUG_RRCSTS_FLUSH 3 +#define MTL_DEBUG_RWCSTS BIT(0) + +/* MTL interrupt */ +#define MTL_RX_OVERFLOW_INT_EN BIT(24) +#define MTL_RX_OVERFLOW_INT BIT(16) + +/* Default operating mode of the MAC */ +#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \ + GMAC_CONFIG_BE | GMAC_CONFIG_DCRS | \ + GMAC_CONFIG_JE) + +/* To dump the core regs excluding the Address Registers */ +#define GMAC_REG_NUM 132 + +/* MTL debug */ +#define MTL_DEBUG_TXSTSFSTS BIT(5) +#define MTL_DEBUG_TXFSTS BIT(4) +#define MTL_DEBUG_TWCSTS BIT(3) + +/* MTL debug: Tx FIFO Read Controller Status */ +#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_TRCSTS_SHIFT 1 +#define MTL_DEBUG_TRCSTS_IDLE 0 +#define MTL_DEBUG_TRCSTS_READ 1 +#define MTL_DEBUG_TRCSTS_TXW 2 +#define MTL_DEBUG_TRCSTS_WRITE 3 +#define MTL_DEBUG_TXPAUSED BIT(0) + +/* MAC debug: GMII or MII Transmit Protocol Engine Status */ +#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4) +#define MTL_DEBUG_RXFSTS_SHIFT 4 +#define MTL_DEBUG_RXFSTS_EMPTY 0 +#define MTL_DEBUG_RXFSTS_BT 1 +#define MTL_DEBUG_RXFSTS_AT 2 +#define MTL_DEBUG_RXFSTS_FULL 3 +#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_RRCSTS_SHIFT 1 +#define MTL_DEBUG_RRCSTS_IDLE 0 +#define MTL_DEBUG_RRCSTS_RDATA 1 +#define MTL_DEBUG_RRCSTS_RSTAT 2 +#define MTL_DEBUG_RRCSTS_FLUSH 3 +#define MTL_DEBUG_RWCSTS BIT(0) + +/* SGMII/RGMII status register */ +#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0) +#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1) +#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4) +#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16) +#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17) +#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17 +#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19) +#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20) +#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21) +/* LNKMOD */ +#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1 +/* LNKSPEED */ +#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2 +#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1 +#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0 + +extern const struct stmmac_dma_ops dwmac4_dma_ops; +extern const struct stmmac_dma_ops dwmac410_dma_ops; +#endif /* __DWMAC4_H__ */ diff --git a/devices/stmmac/dwmac4_core-6.1-ethercat.c b/devices/stmmac/dwmac4_core-6.1-ethercat.c new file mode 100644 index 00000000..fd109bd7 --- /dev/null +++ b/devices/stmmac/dwmac4_core-6.1-ethercat.c @@ -0,0 +1,1334 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + * DWC Ether MAC version 4.00 has been used for developing this code. + * + * This only implements the mac core functions for this chip. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#include +#include +#include +#include +#include "stmmac-6.1-ethercat.h" +#include "stmmac_pcs-6.1-ethercat.h" +#include "dwmac4-6.1-ethercat.h" +#include "dwmac5-6.1-ethercat.h" + +static void dwmac4_core_init(struct mac_device_info *hw, + struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONFIG); + u32 clk_rate; + + value |= GMAC_CORE_INIT; + + if (hw->ps) { + value |= GMAC_CONFIG_TE; + + value &= hw->link.speed_mask; + switch (hw->ps) { + case SPEED_1000: + value |= hw->link.speed1000; + break; + case SPEED_100: + value |= hw->link.speed100; + break; + case SPEED_10: + value |= hw->link.speed10; + break; + } + } + + writel(value, ioaddr + GMAC_CONFIG); + + /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */ + clk_rate = clk_get_rate(priv->plat->stmmac_clk); + writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER); + + /* Enable GMAC interrupts */ + value = GMAC_INT_DEFAULT_ENABLE; + + if (hw->pcs) + value |= GMAC_PCS_IRQ_DEFAULT; + + /* Enable FPE interrupt */ + if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26) + value |= GMAC_INT_FPE_EN; + + writel(value, ioaddr + GMAC_INT_EN); + + if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE) + init_waitqueue_head(&priv->tstamp_busy_wait); +} + +static void dwmac4_rx_queue_enable(struct mac_device_info *hw, + u8 mode, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); + + value &= GMAC_RX_QUEUE_CLEAR(queue); + if (mode == MTL_QUEUE_AVB) + value |= GMAC_RX_AV_QUEUE_ENABLE(queue); + else if (mode == MTL_QUEUE_DCB) + value |= GMAC_RX_DCB_QUEUE_ENABLE(queue); + + writel(value, ioaddr + GMAC_RXQ_CTRL0); +} + +static void dwmac4_rx_queue_priority(struct mac_device_info *hw, + u32 prio, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 base_register; + u32 value; + + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + base_register); + + value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue); + value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & + GMAC_RXQCTRL_PSRQX_MASK(queue); + writel(value, ioaddr + base_register); +} + +static void dwmac4_tx_queue_priority(struct mac_device_info *hw, + u32 prio, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 base_register; + u32 value; + + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + base_register); + + value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue); + value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) & + GMAC_TXQCTRL_PSTQX_MASK(queue); + + writel(value, ioaddr + base_register); +} + +static void dwmac4_rx_queue_routing(struct mac_device_info *hw, + u8 packet, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + static const struct stmmac_rx_routing route_possibilities[] = { + { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, + { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, + { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, + { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT }, + { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT }, + }; + + value = readl(ioaddr + GMAC_RXQ_CTRL1); + + /* routing configuration */ + value &= ~route_possibilities[packet - 1].reg_mask; + value |= (queue << route_possibilities[packet-1].reg_shift) & + route_possibilities[packet - 1].reg_mask; + + /* some packets require extra ops */ + if (packet == PACKET_AVCPQ) { + value &= ~GMAC_RXQCTRL_TACPQE; + value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT; + } else if (packet == PACKET_MCBCQ) { + value &= ~GMAC_RXQCTRL_MCBCQEN; + value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT; + } + + writel(value, ioaddr + GMAC_RXQ_CTRL1); +} + +static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw, + u32 rx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MTL_OPERATION_MODE); + + value &= ~MTL_OPERATION_RAA; + switch (rx_alg) { + case MTL_RX_ALGORITHM_SP: + value |= MTL_OPERATION_RAA_SP; + break; + case MTL_RX_ALGORITHM_WSP: + value |= MTL_OPERATION_RAA_WSP; + break; + default: + break; + } + + writel(value, ioaddr + MTL_OPERATION_MODE); +} + +static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw, + u32 tx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MTL_OPERATION_MODE); + + value &= ~MTL_OPERATION_SCHALG_MASK; + switch (tx_alg) { + case MTL_TX_ALGORITHM_WRR: + value |= MTL_OPERATION_SCHALG_WRR; + break; + case MTL_TX_ALGORITHM_WFQ: + value |= MTL_OPERATION_SCHALG_WFQ; + break; + case MTL_TX_ALGORITHM_DWRR: + value |= MTL_OPERATION_SCHALG_DWRR; + break; + case MTL_TX_ALGORITHM_SP: + value |= MTL_OPERATION_SCHALG_SP; + break; + default: + break; + } + + writel(value, ioaddr + MTL_OPERATION_MODE); +} + +static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw, + u32 weight, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); + + value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK; + value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK; + writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); +} + +static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + if (queue < 4) + value = readl(ioaddr + MTL_RXQ_DMA_MAP0); + else + value = readl(ioaddr + MTL_RXQ_DMA_MAP1); + + if (queue == 0 || queue == 4) { + value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; + value |= MTL_RXQ_DMA_Q04MDMACH(chan); + } else if (queue > 4) { + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4); + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4); + } else { + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); + } + + if (queue < 4) + writel(value, ioaddr + MTL_RXQ_DMA_MAP0); + else + writel(value, ioaddr + MTL_RXQ_DMA_MAP1); +} + +static void dwmac4_config_cbs(struct mac_device_info *hw, + u32 send_slope, u32 idle_slope, + u32 high_credit, u32 low_credit, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + pr_debug("Queue %d configured as AVB. Parameters:\n", queue); + pr_debug("\tsend_slope: 0x%08x\n", send_slope); + pr_debug("\tidle_slope: 0x%08x\n", idle_slope); + pr_debug("\thigh_credit: 0x%08x\n", high_credit); + pr_debug("\tlow_credit: 0x%08x\n", low_credit); + + /* enable AV algorithm */ + value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); + value |= MTL_ETS_CTRL_AVALG; + value |= MTL_ETS_CTRL_CC; + writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); + + /* configure send slope */ + value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); + value &= ~MTL_SEND_SLP_CRED_SSC_MASK; + value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK; + writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); + + /* configure idle slope (same register as tx weight) */ + dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue); + + /* configure high credit */ + value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); + value &= ~MTL_HIGH_CRED_HC_MASK; + value |= high_credit & MTL_HIGH_CRED_HC_MASK; + writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); + + /* configure high credit */ + value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); + value &= ~MTL_HIGH_CRED_LC_MASK; + value |= low_credit & MTL_HIGH_CRED_LC_MASK; + writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); +} + +static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + for (i = 0; i < GMAC_REG_NUM; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + +static int dwmac4_rx_ipc_enable(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONFIG); + + if (hw->rx_csum) + value |= GMAC_CONFIG_IPC; + else + value &= ~GMAC_CONFIG_IPC; + + writel(value, ioaddr + GMAC_CONFIG); + + value = readl(ioaddr + GMAC_CONFIG); + + return !!(value & GMAC_CONFIG_IPC); +} + +static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int pmt = 0; + u32 config; + + if (mode & WAKE_MAGIC) { + pr_debug("GMAC: WOL Magic frame\n"); + pmt |= power_down | magic_pkt_en; + } + if (mode & WAKE_UCAST) { + pr_debug("GMAC: WOL on global unicast\n"); + pmt |= power_down | global_unicast | wake_up_frame_en; + } + + if (pmt) { + /* The receiver must be enabled for WOL before powering down */ + config = readl(ioaddr + GMAC_CONFIG); + config |= GMAC_CONFIG_RE; + writel(config, ioaddr + GMAC_CONFIG); + } + writel(pmt, ioaddr + GMAC_PMT); +} + +static void dwmac4_set_umac_addr(struct mac_device_info *hw, + const unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + + stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac4_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + + stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac4_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + /* Enable the link status receive on RGMII, SGMII ore SMII + * receive path and instruct the transmit to enter in LPI + * state. + */ + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + + if (en_tx_lpi_clockgating) + value |= GMAC4_LPI_CTRL_STATUS_LPITCSE; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA); + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + + if (link) + value |= GMAC4_LPI_CTRL_STATUS_PLS; + else + value &= ~GMAC4_LPI_CTRL_STATUS_PLS; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et) +{ + void __iomem *ioaddr = hw->pcsr; + int value = et & STMMAC_ET_MAX; + int regval; + + /* Program LPI entry timer value into register */ + writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER); + + /* Enable/disable LPI entry timer */ + regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + + if (et) + regval |= GMAC4_LPI_CTRL_STATUS_LPIATE; + else + regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE; + + writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL); +} + +static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 val; + + val = readl(ioaddr + GMAC_VLAN_TAG); + val &= ~GMAC_VLAN_TAG_VID; + val |= GMAC_VLAN_TAG_ETV | vid; + + writel(val, ioaddr + GMAC_VLAN_TAG); +} + +static int dwmac4_write_vlan_filter(struct net_device *dev, + struct mac_device_info *hw, + u8 index, u32 data) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + int i, timeout = 10; + u32 val; + + if (index >= hw->num_vlan) + return -EINVAL; + + writel(data, ioaddr + GMAC_VLAN_TAG_DATA); + + val = readl(ioaddr + GMAC_VLAN_TAG); + val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK | + GMAC_VLAN_TAG_CTRL_CT | + GMAC_VLAN_TAG_CTRL_OB); + val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB; + + writel(val, ioaddr + GMAC_VLAN_TAG); + + for (i = 0; i < timeout; i++) { + val = readl(ioaddr + GMAC_VLAN_TAG); + if (!(val & GMAC_VLAN_TAG_CTRL_OB)) + return 0; + udelay(1); + } + + netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n"); + + return -EBUSY; +} + +static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid) +{ + int index = -1; + u32 val = 0; + int i, ret; + + if (vid > 4095) + return -EINVAL; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + /* For single VLAN filter, VID 0 means VLAN promiscuous */ + if (vid == 0) { + netdev_warn(dev, "Adding VLAN ID 0 is not supported\n"); + return -EPERM; + } + + if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) { + netdev_err(dev, "Only single VLAN ID supported\n"); + return -EPERM; + } + + hw->vlan_filter[0] = vid; + dwmac4_write_single_vlan(dev, vid); + + return 0; + } + + /* Extended Rx VLAN Filter Enable */ + val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid; + + for (i = 0; i < hw->num_vlan; i++) { + if (hw->vlan_filter[i] == val) + return 0; + else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN)) + index = i; + } + + if (index == -1) { + netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n", + hw->num_vlan); + return -EPERM; + } + + ret = dwmac4_write_vlan_filter(dev, hw, index, val); + + if (!ret) + hw->vlan_filter[index] = val; + + return ret; +} + +static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid) +{ + int i, ret = 0; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) { + hw->vlan_filter[0] = 0; + dwmac4_write_single_vlan(dev, 0); + } + return 0; + } + + /* Extended Rx VLAN Filter Enable */ + for (i = 0; i < hw->num_vlan; i++) { + if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) { + ret = dwmac4_write_vlan_filter(dev, hw, i, 0); + + if (!ret) + hw->vlan_filter[i] = 0; + else + return ret; + } + } + + return ret; +} + +static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + u32 hash; + u32 val; + int i; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + dwmac4_write_single_vlan(dev, hw->vlan_filter[0]); + return; + } + + /* Extended Rx VLAN Filter Enable */ + for (i = 0; i < hw->num_vlan; i++) { + if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) { + val = hw->vlan_filter[i]; + dwmac4_write_vlan_filter(dev, hw, i, val); + } + } + + hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE); + if (hash & GMAC_VLAN_VLHT) { + value = readl(ioaddr + GMAC_VLAN_TAG); + value |= GMAC_VLAN_VTHM; + writel(value, ioaddr + GMAC_VLAN_TAG); + } +} + +static void dwmac4_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + int numhashregs = (hw->multicast_filter_bins >> 5); + int mcbitslog2 = hw->mcast_bits_log2; + unsigned int value; + u32 mc_filter[8]; + int i; + + memset(mc_filter, 0, sizeof(mc_filter)); + + value = readl(ioaddr + GMAC_PACKET_FILTER); + value &= ~GMAC_PACKET_FILTER_HMC; + value &= ~GMAC_PACKET_FILTER_HPF; + value &= ~GMAC_PACKET_FILTER_PCF; + value &= ~GMAC_PACKET_FILTER_PM; + value &= ~GMAC_PACKET_FILTER_PR; + value &= ~GMAC_PACKET_FILTER_RA; + if (dev->flags & IFF_PROMISC) { + /* VLAN Tag Filter Fail Packets Queuing */ + if (hw->vlan_fail_q_en) { + value = readl(ioaddr + GMAC_RXQ_CTRL4); + value &= ~GMAC_RXQCTRL_VFFQ_MASK; + value |= GMAC_RXQCTRL_VFFQE | + (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT); + writel(value, ioaddr + GMAC_RXQ_CTRL4); + value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA; + } else { + value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF; + } + + } else if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > hw->multicast_filter_bins)) { + /* Pass all multi */ + value |= GMAC_PACKET_FILTER_PM; + /* Set all the bits of the HASH tab */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { + struct netdev_hw_addr *ha; + + /* Hash filter for multicast */ + value |= GMAC_PACKET_FILTER_HMC; + + netdev_for_each_mc_addr(ha, dev) { + /* The upper n bits of the calculated CRC are used to + * index the contents of the hash table. The number of + * bits used depends on the hardware configuration + * selected at core configuration time. + */ + u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr, + ETH_ALEN)) >> (32 - mcbitslog2); + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f)); + } + } + + for (i = 0; i < numhashregs; i++) + writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i)); + + value |= GMAC_PACKET_FILTER_HPF; + + /* Handle multiple unicast addresses */ + if (netdev_uc_count(dev) > hw->unicast_filter_entries) { + /* Switch to promiscuous mode if more than 128 addrs + * are required + */ + value |= GMAC_PACKET_FILTER_PR; + } else { + struct netdev_hw_addr *ha; + int reg = 1; + + netdev_for_each_uc_addr(ha, dev) { + dwmac4_set_umac_addr(hw, ha->addr, reg); + reg++; + } + + while (reg < GMAC_MAX_PERFECT_ADDRESSES) { + writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); + writel(0, ioaddr + GMAC_ADDR_LOW(reg)); + reg++; + } + } + + /* VLAN filtering */ + if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) + value &= ~GMAC_PACKET_FILTER_VTFE; + else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + value |= GMAC_PACKET_FILTER_VTFE; + + writel(value, ioaddr + GMAC_PACKET_FILTER); +} + +static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int flow = 0; + u32 queue = 0; + + pr_debug("GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { + pr_debug("\tReceive Flow-Control ON\n"); + flow |= GMAC_RX_FLOW_CTRL_RFE; + } else { + pr_debug("\tReceive Flow-Control OFF\n"); + } + writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); + + if (fc & FLOW_TX) { + pr_debug("\tTransmit Flow-Control ON\n"); + + if (duplex) + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); + + for (queue = 0; queue < tx_cnt; queue++) { + flow = GMAC_TX_FLOW_CTRL_TFE; + + if (duplex) + flow |= + (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); + + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); + } + } else { + for (queue = 0; queue < tx_cnt; queue++) + writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); + } +} + +static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback) +{ + dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); +} + +static void dwmac4_rane(void __iomem *ioaddr, bool restart) +{ + dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); +} + +static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) +{ + dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); +} + +/* RGMII or SMII interface */ +static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 status; + + status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS); + x->irq_rgmii_n++; + + /* Check the link status */ + if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) { + int speed_value; + + x->pcs_link = 1; + + speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >> + GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT); + if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; + + x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK); + + pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_info("Link is Down\n"); + } +} + +static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + u32 mtl_int_qx_status; + int ret = 0; + + mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); + + /* Check MTL Interrupt */ + if (mtl_int_qx_status & MTL_INT_QX(chan)) { + /* read Queue x Interrupt status */ + u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan)); + + if (status & MTL_RX_OVERFLOW_INT) { + /* clear Interrupt */ + writel(status | MTL_RX_OVERFLOW_INT, + ioaddr + MTL_CHAN_INT_CTRL(chan)); + ret = CORE_IRQ_MTL_RX_OVERFLOW; + } + } + + return ret; +} + +static int dwmac4_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + u32 intr_enable = readl(ioaddr + GMAC_INT_EN); + int ret = 0; + + /* Discard disabled bits */ + intr_status &= intr_enable; + + /* Not used events (e.g. MMC interrupts) are not handled. */ + if ((intr_status & mmc_tx_irq)) + x->mmc_tx_irq_n++; + if (unlikely(intr_status & mmc_rx_irq)) + x->mmc_rx_irq_n++; + if (unlikely(intr_status & mmc_rx_csum_offload_irq)) + x->mmc_rx_csum_offload_irq_n++; + /* Clear the PMT bits 5 and 6 by reading the PMT status reg */ + if (unlikely(intr_status & pmt_irq)) { + readl(ioaddr + GMAC_PMT); + x->irq_receive_pmt_irq_n++; + } + + /* MAC tx/rx EEE LPI entry/exit interrupts */ + if (intr_status & lpi_irq) { + /* Clear LPI interrupt by reading MAC_LPI_Control_Status */ + u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + + if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) { + ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; + x->irq_tx_path_in_lpi_mode_n++; + } + if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) { + ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; + x->irq_tx_path_exit_lpi_mode_n++; + } + if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac4_phystatus(ioaddr, x); + + return ret; +} + +static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues) +{ + u32 value; + u32 queue; + + for (queue = 0; queue < tx_queues; queue++) { + value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue)); + + if (value & MTL_DEBUG_TXSTSFSTS) + x->mtl_tx_status_fifo_full++; + if (value & MTL_DEBUG_TXFSTS) + x->mtl_tx_fifo_not_empty++; + if (value & MTL_DEBUG_TWCSTS) + x->mmtl_fifo_ctrl++; + if (value & MTL_DEBUG_TRCSTS_MASK) { + u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) + >> MTL_DEBUG_TRCSTS_SHIFT; + if (trcsts == MTL_DEBUG_TRCSTS_WRITE) + x->mtl_tx_fifo_read_ctrl_write++; + else if (trcsts == MTL_DEBUG_TRCSTS_TXW) + x->mtl_tx_fifo_read_ctrl_wait++; + else if (trcsts == MTL_DEBUG_TRCSTS_READ) + x->mtl_tx_fifo_read_ctrl_read++; + else + x->mtl_tx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_TXPAUSED) + x->mac_tx_in_pause++; + } + + for (queue = 0; queue < rx_queues; queue++) { + value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue)); + + if (value & MTL_DEBUG_RXFSTS_MASK) { + u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) + >> MTL_DEBUG_RRCSTS_SHIFT; + + if (rxfsts == MTL_DEBUG_RXFSTS_FULL) + x->mtl_rx_fifo_fill_level_full++; + else if (rxfsts == MTL_DEBUG_RXFSTS_AT) + x->mtl_rx_fifo_fill_above_thresh++; + else if (rxfsts == MTL_DEBUG_RXFSTS_BT) + x->mtl_rx_fifo_fill_below_thresh++; + else + x->mtl_rx_fifo_fill_level_empty++; + } + if (value & MTL_DEBUG_RRCSTS_MASK) { + u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> + MTL_DEBUG_RRCSTS_SHIFT; + + if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) + x->mtl_rx_fifo_read_ctrl_flush++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) + x->mtl_rx_fifo_read_ctrl_read_data++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) + x->mtl_rx_fifo_read_ctrl_status++; + else + x->mtl_rx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_RWCSTS) + x->mtl_rx_fifo_ctrl_active++; + } + + /* GMAC debug */ + value = readl(ioaddr + GMAC_DEBUG); + + if (value & GMAC_DEBUG_TFCSTS_MASK) { + u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) + >> GMAC_DEBUG_TFCSTS_SHIFT; + + if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) + x->mac_tx_frame_ctrl_xfer++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) + x->mac_tx_frame_ctrl_pause++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) + x->mac_tx_frame_ctrl_wait++; + else + x->mac_tx_frame_ctrl_idle++; + } + if (value & GMAC_DEBUG_TPESTS) + x->mac_gmii_tx_proto_engine++; + if (value & GMAC_DEBUG_RFCFCSTS_MASK) + x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) + >> GMAC_DEBUG_RFCFCSTS_SHIFT; + if (value & GMAC_DEBUG_RPESTS) + x->mac_gmii_rx_proto_engine++; +} + +static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + GMAC_CONFIG); + + if (enable) + value |= GMAC_CONFIG_LM; + else + value &= ~GMAC_CONFIG_LM; + + writel(value, ioaddr + GMAC_CONFIG); +} + +static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, + __le16 perfect_match, bool is_double) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE); + + value = readl(ioaddr + GMAC_VLAN_TAG); + + if (hash) { + value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV; + if (is_double) { + value |= GMAC_VLAN_EDVLP; + value |= GMAC_VLAN_ESVL; + value |= GMAC_VLAN_DOVLTC; + } + + writel(value, ioaddr + GMAC_VLAN_TAG); + } else if (perfect_match) { + u32 value = GMAC_VLAN_ETV; + + if (is_double) { + value |= GMAC_VLAN_EDVLP; + value |= GMAC_VLAN_ESVL; + value |= GMAC_VLAN_DOVLTC; + } + + writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG); + } else { + value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); + value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); + value &= ~GMAC_VLAN_DOVLTC; + value &= ~GMAC_VLAN_VID; + + writel(value, ioaddr + GMAC_VLAN_TAG); + } +} + +static void dwmac4_sarc_configure(void __iomem *ioaddr, int val) +{ + u32 value = readl(ioaddr + GMAC_CONFIG); + + value &= ~GMAC_CONFIG_SARC; + value |= val << GMAC_CONFIG_SARC_SHIFT; + + writel(value, ioaddr + GMAC_CONFIG); +} + +static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC_VLAN_INCL); + value |= GMAC_VLAN_VLTI; + value |= GMAC_VLAN_CSVL; /* Only use SVLAN */ + value &= ~GMAC_VLAN_VLC; + value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC; + writel(value, ioaddr + GMAC_VLAN_INCL); +} + +static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en, + u32 addr) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(addr, ioaddr + GMAC_ARP_ADDR); + + value = readl(ioaddr + GMAC_CONFIG); + if (en) + value |= GMAC_CONFIG_ARPEN; + else + value &= ~GMAC_CONFIG_ARPEN; + writel(value, ioaddr + GMAC_CONFIG); +} + +static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool ipv6, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC_PACKET_FILTER); + value |= GMAC_PACKET_FILTER_IPFE; + writel(value, ioaddr + GMAC_PACKET_FILTER); + + value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no)); + + /* For IPv6 not both SA/DA filters can be active */ + if (ipv6) { + value |= GMAC_L3PEN0; + value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0); + value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0); + if (sa) { + value |= GMAC_L3SAM0; + if (inv) + value |= GMAC_L3SAIM0; + } else { + value |= GMAC_L3DAM0; + if (inv) + value |= GMAC_L3DAIM0; + } + } else { + value &= ~GMAC_L3PEN0; + if (sa) { + value |= GMAC_L3SAM0; + if (inv) + value |= GMAC_L3SAIM0; + } else { + value |= GMAC_L3DAM0; + if (inv) + value |= GMAC_L3DAIM0; + } + } + + writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no)); + + if (sa) { + writel(match, ioaddr + GMAC_L3_ADDR0(filter_no)); + } else { + writel(match, ioaddr + GMAC_L3_ADDR1(filter_no)); + } + + if (!en) + writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no)); + + return 0; +} + +static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool udp, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC_PACKET_FILTER); + value |= GMAC_PACKET_FILTER_IPFE; + writel(value, ioaddr + GMAC_PACKET_FILTER); + + value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no)); + if (udp) { + value |= GMAC_L4PEN0; + } else { + value &= ~GMAC_L4PEN0; + } + + value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0); + value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0); + if (sa) { + value |= GMAC_L4SPM0; + if (inv) + value |= GMAC_L4SPIM0; + } else { + value |= GMAC_L4DPM0; + if (inv) + value |= GMAC_L4DPIM0; + } + + writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no)); + + if (sa) { + value = match & GMAC_L4SP0; + } else { + value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0; + } + + writel(value, ioaddr + GMAC_L4_ADDR(filter_no)); + + if (!en) + writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no)); + + return 0; +} + +const struct stmmac_ops dwmac4_ops = { + .core_init = dwmac4_core_init, + .set_mac = stmmac_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, + .rx_queue_prio = dwmac4_rx_queue_priority, + .tx_queue_prio = dwmac4_tx_queue_priority, + .rx_queue_routing = dwmac4_rx_queue_routing, + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwmac4_map_mtl_dma, + .config_cbs = dwmac4_config_cbs, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .host_mtl_irq_status = dwmac4_irq_mtl_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, + .set_mac_loopback = dwmac4_set_mac_loopback, + .update_vlan_hash = dwmac4_update_vlan_hash, + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, + .config_l3_filter = dwmac4_config_l3_filter, + .config_l4_filter = dwmac4_config_l4_filter, + .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, + .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, + .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, +}; + +const struct stmmac_ops dwmac410_ops = { + .core_init = dwmac4_core_init, + .set_mac = stmmac_dwmac4_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, + .rx_queue_prio = dwmac4_rx_queue_priority, + .tx_queue_prio = dwmac4_tx_queue_priority, + .rx_queue_routing = dwmac4_rx_queue_routing, + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwmac4_map_mtl_dma, + .config_cbs = dwmac4_config_cbs, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .host_mtl_irq_status = dwmac4_irq_mtl_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, + .flex_pps_config = dwmac5_flex_pps_config, + .set_mac_loopback = dwmac4_set_mac_loopback, + .update_vlan_hash = dwmac4_update_vlan_hash, + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, + .config_l3_filter = dwmac4_config_l3_filter, + .config_l4_filter = dwmac4_config_l4_filter, + .est_configure = dwmac5_est_configure, + .est_irq_status = dwmac5_est_irq_status, + .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, + .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, + .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, + .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, +}; + +const struct stmmac_ops dwmac510_ops = { + .core_init = dwmac4_core_init, + .set_mac = stmmac_dwmac4_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, + .rx_queue_prio = dwmac4_rx_queue_priority, + .tx_queue_prio = dwmac4_tx_queue_priority, + .rx_queue_routing = dwmac4_rx_queue_routing, + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwmac4_map_mtl_dma, + .config_cbs = dwmac4_config_cbs, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .host_mtl_irq_status = dwmac4_irq_mtl_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, + .safety_feat_config = dwmac5_safety_feat_config, + .safety_feat_irq_status = dwmac5_safety_feat_irq_status, + .safety_feat_dump = dwmac5_safety_feat_dump, + .rxp_config = dwmac5_rxp_config, + .flex_pps_config = dwmac5_flex_pps_config, + .set_mac_loopback = dwmac4_set_mac_loopback, + .update_vlan_hash = dwmac4_update_vlan_hash, + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, + .config_l3_filter = dwmac4_config_l3_filter, + .config_l4_filter = dwmac4_config_l4_filter, + .est_configure = dwmac5_est_configure, + .est_irq_status = dwmac5_est_irq_status, + .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, + .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, + .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, + .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, +}; + +static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) +{ + u32 val, num_vlan; + + val = readl(ioaddr + GMAC_HW_FEATURE3); + switch (val & GMAC_HW_FEAT_NRVF) { + case 0: + num_vlan = 1; + break; + case 1: + num_vlan = 4; + break; + case 2: + num_vlan = 8; + break; + case 3: + num_vlan = 16; + break; + case 4: + num_vlan = 24; + break; + case 5: + num_vlan = 32; + break; + default: + num_vlan = 1; + } + + return num_vlan; +} + +int dwmac4_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tDWMAC4/5\n"); + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.duplex = GMAC_CONFIG_DM; + mac->link.speed10 = GMAC_CONFIG_PS; + mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS; + mac->link.speed1000 = 0; + mac->link.speed2500 = GMAC_CONFIG_FES; + mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS; + mac->mii.addr = GMAC_MDIO_ADDR; + mac->mii.data = GMAC_MDIO_DATA; + mac->mii.addr_shift = 21; + mac->mii.addr_mask = GENMASK(25, 21); + mac->mii.reg_shift = 16; + mac->mii.reg_mask = GENMASK(20, 16); + mac->mii.clk_csr_shift = 8; + mac->mii.clk_csr_mask = GENMASK(11, 8); + mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr); + + return 0; +} diff --git a/devices/stmmac/dwmac4_core-6.1-orig.c b/devices/stmmac/dwmac4_core-6.1-orig.c new file mode 100644 index 00000000..84276eb6 --- /dev/null +++ b/devices/stmmac/dwmac4_core-6.1-orig.c @@ -0,0 +1,1334 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + * DWC Ether MAC version 4.00 has been used for developing this code. + * + * This only implements the mac core functions for this chip. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#include +#include +#include +#include +#include "stmmac.h" +#include "stmmac_pcs.h" +#include "dwmac4.h" +#include "dwmac5.h" + +static void dwmac4_core_init(struct mac_device_info *hw, + struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONFIG); + u32 clk_rate; + + value |= GMAC_CORE_INIT; + + if (hw->ps) { + value |= GMAC_CONFIG_TE; + + value &= hw->link.speed_mask; + switch (hw->ps) { + case SPEED_1000: + value |= hw->link.speed1000; + break; + case SPEED_100: + value |= hw->link.speed100; + break; + case SPEED_10: + value |= hw->link.speed10; + break; + } + } + + writel(value, ioaddr + GMAC_CONFIG); + + /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */ + clk_rate = clk_get_rate(priv->plat->stmmac_clk); + writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER); + + /* Enable GMAC interrupts */ + value = GMAC_INT_DEFAULT_ENABLE; + + if (hw->pcs) + value |= GMAC_PCS_IRQ_DEFAULT; + + /* Enable FPE interrupt */ + if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26) + value |= GMAC_INT_FPE_EN; + + writel(value, ioaddr + GMAC_INT_EN); + + if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE) + init_waitqueue_head(&priv->tstamp_busy_wait); +} + +static void dwmac4_rx_queue_enable(struct mac_device_info *hw, + u8 mode, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); + + value &= GMAC_RX_QUEUE_CLEAR(queue); + if (mode == MTL_QUEUE_AVB) + value |= GMAC_RX_AV_QUEUE_ENABLE(queue); + else if (mode == MTL_QUEUE_DCB) + value |= GMAC_RX_DCB_QUEUE_ENABLE(queue); + + writel(value, ioaddr + GMAC_RXQ_CTRL0); +} + +static void dwmac4_rx_queue_priority(struct mac_device_info *hw, + u32 prio, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 base_register; + u32 value; + + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + base_register); + + value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue); + value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & + GMAC_RXQCTRL_PSRQX_MASK(queue); + writel(value, ioaddr + base_register); +} + +static void dwmac4_tx_queue_priority(struct mac_device_info *hw, + u32 prio, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 base_register; + u32 value; + + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + base_register); + + value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue); + value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) & + GMAC_TXQCTRL_PSTQX_MASK(queue); + + writel(value, ioaddr + base_register); +} + +static void dwmac4_rx_queue_routing(struct mac_device_info *hw, + u8 packet, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + static const struct stmmac_rx_routing route_possibilities[] = { + { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, + { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, + { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, + { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT }, + { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT }, + }; + + value = readl(ioaddr + GMAC_RXQ_CTRL1); + + /* routing configuration */ + value &= ~route_possibilities[packet - 1].reg_mask; + value |= (queue << route_possibilities[packet-1].reg_shift) & + route_possibilities[packet - 1].reg_mask; + + /* some packets require extra ops */ + if (packet == PACKET_AVCPQ) { + value &= ~GMAC_RXQCTRL_TACPQE; + value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT; + } else if (packet == PACKET_MCBCQ) { + value &= ~GMAC_RXQCTRL_MCBCQEN; + value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT; + } + + writel(value, ioaddr + GMAC_RXQ_CTRL1); +} + +static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw, + u32 rx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MTL_OPERATION_MODE); + + value &= ~MTL_OPERATION_RAA; + switch (rx_alg) { + case MTL_RX_ALGORITHM_SP: + value |= MTL_OPERATION_RAA_SP; + break; + case MTL_RX_ALGORITHM_WSP: + value |= MTL_OPERATION_RAA_WSP; + break; + default: + break; + } + + writel(value, ioaddr + MTL_OPERATION_MODE); +} + +static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw, + u32 tx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MTL_OPERATION_MODE); + + value &= ~MTL_OPERATION_SCHALG_MASK; + switch (tx_alg) { + case MTL_TX_ALGORITHM_WRR: + value |= MTL_OPERATION_SCHALG_WRR; + break; + case MTL_TX_ALGORITHM_WFQ: + value |= MTL_OPERATION_SCHALG_WFQ; + break; + case MTL_TX_ALGORITHM_DWRR: + value |= MTL_OPERATION_SCHALG_DWRR; + break; + case MTL_TX_ALGORITHM_SP: + value |= MTL_OPERATION_SCHALG_SP; + break; + default: + break; + } + + writel(value, ioaddr + MTL_OPERATION_MODE); +} + +static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw, + u32 weight, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); + + value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK; + value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK; + writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); +} + +static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + if (queue < 4) + value = readl(ioaddr + MTL_RXQ_DMA_MAP0); + else + value = readl(ioaddr + MTL_RXQ_DMA_MAP1); + + if (queue == 0 || queue == 4) { + value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; + value |= MTL_RXQ_DMA_Q04MDMACH(chan); + } else if (queue > 4) { + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4); + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4); + } else { + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); + } + + if (queue < 4) + writel(value, ioaddr + MTL_RXQ_DMA_MAP0); + else + writel(value, ioaddr + MTL_RXQ_DMA_MAP1); +} + +static void dwmac4_config_cbs(struct mac_device_info *hw, + u32 send_slope, u32 idle_slope, + u32 high_credit, u32 low_credit, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + pr_debug("Queue %d configured as AVB. Parameters:\n", queue); + pr_debug("\tsend_slope: 0x%08x\n", send_slope); + pr_debug("\tidle_slope: 0x%08x\n", idle_slope); + pr_debug("\thigh_credit: 0x%08x\n", high_credit); + pr_debug("\tlow_credit: 0x%08x\n", low_credit); + + /* enable AV algorithm */ + value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); + value |= MTL_ETS_CTRL_AVALG; + value |= MTL_ETS_CTRL_CC; + writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); + + /* configure send slope */ + value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); + value &= ~MTL_SEND_SLP_CRED_SSC_MASK; + value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK; + writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); + + /* configure idle slope (same register as tx weight) */ + dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue); + + /* configure high credit */ + value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); + value &= ~MTL_HIGH_CRED_HC_MASK; + value |= high_credit & MTL_HIGH_CRED_HC_MASK; + writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); + + /* configure high credit */ + value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); + value &= ~MTL_HIGH_CRED_LC_MASK; + value |= low_credit & MTL_HIGH_CRED_LC_MASK; + writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); +} + +static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + for (i = 0; i < GMAC_REG_NUM; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + +static int dwmac4_rx_ipc_enable(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONFIG); + + if (hw->rx_csum) + value |= GMAC_CONFIG_IPC; + else + value &= ~GMAC_CONFIG_IPC; + + writel(value, ioaddr + GMAC_CONFIG); + + value = readl(ioaddr + GMAC_CONFIG); + + return !!(value & GMAC_CONFIG_IPC); +} + +static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int pmt = 0; + u32 config; + + if (mode & WAKE_MAGIC) { + pr_debug("GMAC: WOL Magic frame\n"); + pmt |= power_down | magic_pkt_en; + } + if (mode & WAKE_UCAST) { + pr_debug("GMAC: WOL on global unicast\n"); + pmt |= power_down | global_unicast | wake_up_frame_en; + } + + if (pmt) { + /* The receiver must be enabled for WOL before powering down */ + config = readl(ioaddr + GMAC_CONFIG); + config |= GMAC_CONFIG_RE; + writel(config, ioaddr + GMAC_CONFIG); + } + writel(pmt, ioaddr + GMAC_PMT); +} + +static void dwmac4_set_umac_addr(struct mac_device_info *hw, + const unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + + stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac4_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + + stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac4_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + /* Enable the link status receive on RGMII, SGMII ore SMII + * receive path and instruct the transmit to enter in LPI + * state. + */ + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + + if (en_tx_lpi_clockgating) + value |= GMAC4_LPI_CTRL_STATUS_LPITCSE; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA); + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + + if (link) + value |= GMAC4_LPI_CTRL_STATUS_PLS; + else + value &= ~GMAC4_LPI_CTRL_STATUS_PLS; + + writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et) +{ + void __iomem *ioaddr = hw->pcsr; + int value = et & STMMAC_ET_MAX; + int regval; + + /* Program LPI entry timer value into register */ + writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER); + + /* Enable/disable LPI entry timer */ + regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + + if (et) + regval |= GMAC4_LPI_CTRL_STATUS_LPIATE; + else + regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE; + + writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + +static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL); +} + +static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 val; + + val = readl(ioaddr + GMAC_VLAN_TAG); + val &= ~GMAC_VLAN_TAG_VID; + val |= GMAC_VLAN_TAG_ETV | vid; + + writel(val, ioaddr + GMAC_VLAN_TAG); +} + +static int dwmac4_write_vlan_filter(struct net_device *dev, + struct mac_device_info *hw, + u8 index, u32 data) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + int i, timeout = 10; + u32 val; + + if (index >= hw->num_vlan) + return -EINVAL; + + writel(data, ioaddr + GMAC_VLAN_TAG_DATA); + + val = readl(ioaddr + GMAC_VLAN_TAG); + val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK | + GMAC_VLAN_TAG_CTRL_CT | + GMAC_VLAN_TAG_CTRL_OB); + val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB; + + writel(val, ioaddr + GMAC_VLAN_TAG); + + for (i = 0; i < timeout; i++) { + val = readl(ioaddr + GMAC_VLAN_TAG); + if (!(val & GMAC_VLAN_TAG_CTRL_OB)) + return 0; + udelay(1); + } + + netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n"); + + return -EBUSY; +} + +static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid) +{ + int index = -1; + u32 val = 0; + int i, ret; + + if (vid > 4095) + return -EINVAL; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + /* For single VLAN filter, VID 0 means VLAN promiscuous */ + if (vid == 0) { + netdev_warn(dev, "Adding VLAN ID 0 is not supported\n"); + return -EPERM; + } + + if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) { + netdev_err(dev, "Only single VLAN ID supported\n"); + return -EPERM; + } + + hw->vlan_filter[0] = vid; + dwmac4_write_single_vlan(dev, vid); + + return 0; + } + + /* Extended Rx VLAN Filter Enable */ + val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid; + + for (i = 0; i < hw->num_vlan; i++) { + if (hw->vlan_filter[i] == val) + return 0; + else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN)) + index = i; + } + + if (index == -1) { + netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n", + hw->num_vlan); + return -EPERM; + } + + ret = dwmac4_write_vlan_filter(dev, hw, index, val); + + if (!ret) + hw->vlan_filter[index] = val; + + return ret; +} + +static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid) +{ + int i, ret = 0; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) { + hw->vlan_filter[0] = 0; + dwmac4_write_single_vlan(dev, 0); + } + return 0; + } + + /* Extended Rx VLAN Filter Enable */ + for (i = 0; i < hw->num_vlan; i++) { + if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) { + ret = dwmac4_write_vlan_filter(dev, hw, i, 0); + + if (!ret) + hw->vlan_filter[i] = 0; + else + return ret; + } + } + + return ret; +} + +static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev, + struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + u32 hash; + u32 val; + int i; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + dwmac4_write_single_vlan(dev, hw->vlan_filter[0]); + return; + } + + /* Extended Rx VLAN Filter Enable */ + for (i = 0; i < hw->num_vlan; i++) { + if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) { + val = hw->vlan_filter[i]; + dwmac4_write_vlan_filter(dev, hw, i, val); + } + } + + hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE); + if (hash & GMAC_VLAN_VLHT) { + value = readl(ioaddr + GMAC_VLAN_TAG); + value |= GMAC_VLAN_VTHM; + writel(value, ioaddr + GMAC_VLAN_TAG); + } +} + +static void dwmac4_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + int numhashregs = (hw->multicast_filter_bins >> 5); + int mcbitslog2 = hw->mcast_bits_log2; + unsigned int value; + u32 mc_filter[8]; + int i; + + memset(mc_filter, 0, sizeof(mc_filter)); + + value = readl(ioaddr + GMAC_PACKET_FILTER); + value &= ~GMAC_PACKET_FILTER_HMC; + value &= ~GMAC_PACKET_FILTER_HPF; + value &= ~GMAC_PACKET_FILTER_PCF; + value &= ~GMAC_PACKET_FILTER_PM; + value &= ~GMAC_PACKET_FILTER_PR; + value &= ~GMAC_PACKET_FILTER_RA; + if (dev->flags & IFF_PROMISC) { + /* VLAN Tag Filter Fail Packets Queuing */ + if (hw->vlan_fail_q_en) { + value = readl(ioaddr + GMAC_RXQ_CTRL4); + value &= ~GMAC_RXQCTRL_VFFQ_MASK; + value |= GMAC_RXQCTRL_VFFQE | + (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT); + writel(value, ioaddr + GMAC_RXQ_CTRL4); + value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA; + } else { + value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF; + } + + } else if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > hw->multicast_filter_bins)) { + /* Pass all multi */ + value |= GMAC_PACKET_FILTER_PM; + /* Set all the bits of the HASH tab */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { + struct netdev_hw_addr *ha; + + /* Hash filter for multicast */ + value |= GMAC_PACKET_FILTER_HMC; + + netdev_for_each_mc_addr(ha, dev) { + /* The upper n bits of the calculated CRC are used to + * index the contents of the hash table. The number of + * bits used depends on the hardware configuration + * selected at core configuration time. + */ + u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr, + ETH_ALEN)) >> (32 - mcbitslog2); + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f)); + } + } + + for (i = 0; i < numhashregs; i++) + writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i)); + + value |= GMAC_PACKET_FILTER_HPF; + + /* Handle multiple unicast addresses */ + if (netdev_uc_count(dev) > hw->unicast_filter_entries) { + /* Switch to promiscuous mode if more than 128 addrs + * are required + */ + value |= GMAC_PACKET_FILTER_PR; + } else { + struct netdev_hw_addr *ha; + int reg = 1; + + netdev_for_each_uc_addr(ha, dev) { + dwmac4_set_umac_addr(hw, ha->addr, reg); + reg++; + } + + while (reg < GMAC_MAX_PERFECT_ADDRESSES) { + writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); + writel(0, ioaddr + GMAC_ADDR_LOW(reg)); + reg++; + } + } + + /* VLAN filtering */ + if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) + value &= ~GMAC_PACKET_FILTER_VTFE; + else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + value |= GMAC_PACKET_FILTER_VTFE; + + writel(value, ioaddr + GMAC_PACKET_FILTER); +} + +static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int flow = 0; + u32 queue = 0; + + pr_debug("GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { + pr_debug("\tReceive Flow-Control ON\n"); + flow |= GMAC_RX_FLOW_CTRL_RFE; + } else { + pr_debug("\tReceive Flow-Control OFF\n"); + } + writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); + + if (fc & FLOW_TX) { + pr_debug("\tTransmit Flow-Control ON\n"); + + if (duplex) + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); + + for (queue = 0; queue < tx_cnt; queue++) { + flow = GMAC_TX_FLOW_CTRL_TFE; + + if (duplex) + flow |= + (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); + + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); + } + } else { + for (queue = 0; queue < tx_cnt; queue++) + writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); + } +} + +static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback) +{ + dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); +} + +static void dwmac4_rane(void __iomem *ioaddr, bool restart) +{ + dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); +} + +static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) +{ + dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); +} + +/* RGMII or SMII interface */ +static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 status; + + status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS); + x->irq_rgmii_n++; + + /* Check the link status */ + if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) { + int speed_value; + + x->pcs_link = 1; + + speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >> + GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT); + if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; + + x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK); + + pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_info("Link is Down\n"); + } +} + +static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + u32 mtl_int_qx_status; + int ret = 0; + + mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); + + /* Check MTL Interrupt */ + if (mtl_int_qx_status & MTL_INT_QX(chan)) { + /* read Queue x Interrupt status */ + u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan)); + + if (status & MTL_RX_OVERFLOW_INT) { + /* clear Interrupt */ + writel(status | MTL_RX_OVERFLOW_INT, + ioaddr + MTL_CHAN_INT_CTRL(chan)); + ret = CORE_IRQ_MTL_RX_OVERFLOW; + } + } + + return ret; +} + +static int dwmac4_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + u32 intr_enable = readl(ioaddr + GMAC_INT_EN); + int ret = 0; + + /* Discard disabled bits */ + intr_status &= intr_enable; + + /* Not used events (e.g. MMC interrupts) are not handled. */ + if ((intr_status & mmc_tx_irq)) + x->mmc_tx_irq_n++; + if (unlikely(intr_status & mmc_rx_irq)) + x->mmc_rx_irq_n++; + if (unlikely(intr_status & mmc_rx_csum_offload_irq)) + x->mmc_rx_csum_offload_irq_n++; + /* Clear the PMT bits 5 and 6 by reading the PMT status reg */ + if (unlikely(intr_status & pmt_irq)) { + readl(ioaddr + GMAC_PMT); + x->irq_receive_pmt_irq_n++; + } + + /* MAC tx/rx EEE LPI entry/exit interrupts */ + if (intr_status & lpi_irq) { + /* Clear LPI interrupt by reading MAC_LPI_Control_Status */ + u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + + if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) { + ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; + x->irq_tx_path_in_lpi_mode_n++; + } + if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) { + ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; + x->irq_tx_path_exit_lpi_mode_n++; + } + if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac4_phystatus(ioaddr, x); + + return ret; +} + +static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues) +{ + u32 value; + u32 queue; + + for (queue = 0; queue < tx_queues; queue++) { + value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue)); + + if (value & MTL_DEBUG_TXSTSFSTS) + x->mtl_tx_status_fifo_full++; + if (value & MTL_DEBUG_TXFSTS) + x->mtl_tx_fifo_not_empty++; + if (value & MTL_DEBUG_TWCSTS) + x->mmtl_fifo_ctrl++; + if (value & MTL_DEBUG_TRCSTS_MASK) { + u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) + >> MTL_DEBUG_TRCSTS_SHIFT; + if (trcsts == MTL_DEBUG_TRCSTS_WRITE) + x->mtl_tx_fifo_read_ctrl_write++; + else if (trcsts == MTL_DEBUG_TRCSTS_TXW) + x->mtl_tx_fifo_read_ctrl_wait++; + else if (trcsts == MTL_DEBUG_TRCSTS_READ) + x->mtl_tx_fifo_read_ctrl_read++; + else + x->mtl_tx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_TXPAUSED) + x->mac_tx_in_pause++; + } + + for (queue = 0; queue < rx_queues; queue++) { + value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue)); + + if (value & MTL_DEBUG_RXFSTS_MASK) { + u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) + >> MTL_DEBUG_RRCSTS_SHIFT; + + if (rxfsts == MTL_DEBUG_RXFSTS_FULL) + x->mtl_rx_fifo_fill_level_full++; + else if (rxfsts == MTL_DEBUG_RXFSTS_AT) + x->mtl_rx_fifo_fill_above_thresh++; + else if (rxfsts == MTL_DEBUG_RXFSTS_BT) + x->mtl_rx_fifo_fill_below_thresh++; + else + x->mtl_rx_fifo_fill_level_empty++; + } + if (value & MTL_DEBUG_RRCSTS_MASK) { + u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> + MTL_DEBUG_RRCSTS_SHIFT; + + if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) + x->mtl_rx_fifo_read_ctrl_flush++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) + x->mtl_rx_fifo_read_ctrl_read_data++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) + x->mtl_rx_fifo_read_ctrl_status++; + else + x->mtl_rx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_RWCSTS) + x->mtl_rx_fifo_ctrl_active++; + } + + /* GMAC debug */ + value = readl(ioaddr + GMAC_DEBUG); + + if (value & GMAC_DEBUG_TFCSTS_MASK) { + u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) + >> GMAC_DEBUG_TFCSTS_SHIFT; + + if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) + x->mac_tx_frame_ctrl_xfer++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) + x->mac_tx_frame_ctrl_pause++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) + x->mac_tx_frame_ctrl_wait++; + else + x->mac_tx_frame_ctrl_idle++; + } + if (value & GMAC_DEBUG_TPESTS) + x->mac_gmii_tx_proto_engine++; + if (value & GMAC_DEBUG_RFCFCSTS_MASK) + x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) + >> GMAC_DEBUG_RFCFCSTS_SHIFT; + if (value & GMAC_DEBUG_RPESTS) + x->mac_gmii_rx_proto_engine++; +} + +static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + GMAC_CONFIG); + + if (enable) + value |= GMAC_CONFIG_LM; + else + value &= ~GMAC_CONFIG_LM; + + writel(value, ioaddr + GMAC_CONFIG); +} + +static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, + __le16 perfect_match, bool is_double) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE); + + value = readl(ioaddr + GMAC_VLAN_TAG); + + if (hash) { + value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV; + if (is_double) { + value |= GMAC_VLAN_EDVLP; + value |= GMAC_VLAN_ESVL; + value |= GMAC_VLAN_DOVLTC; + } + + writel(value, ioaddr + GMAC_VLAN_TAG); + } else if (perfect_match) { + u32 value = GMAC_VLAN_ETV; + + if (is_double) { + value |= GMAC_VLAN_EDVLP; + value |= GMAC_VLAN_ESVL; + value |= GMAC_VLAN_DOVLTC; + } + + writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG); + } else { + value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); + value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); + value &= ~GMAC_VLAN_DOVLTC; + value &= ~GMAC_VLAN_VID; + + writel(value, ioaddr + GMAC_VLAN_TAG); + } +} + +static void dwmac4_sarc_configure(void __iomem *ioaddr, int val) +{ + u32 value = readl(ioaddr + GMAC_CONFIG); + + value &= ~GMAC_CONFIG_SARC; + value |= val << GMAC_CONFIG_SARC_SHIFT; + + writel(value, ioaddr + GMAC_CONFIG); +} + +static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC_VLAN_INCL); + value |= GMAC_VLAN_VLTI; + value |= GMAC_VLAN_CSVL; /* Only use SVLAN */ + value &= ~GMAC_VLAN_VLC; + value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC; + writel(value, ioaddr + GMAC_VLAN_INCL); +} + +static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en, + u32 addr) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(addr, ioaddr + GMAC_ARP_ADDR); + + value = readl(ioaddr + GMAC_CONFIG); + if (en) + value |= GMAC_CONFIG_ARPEN; + else + value &= ~GMAC_CONFIG_ARPEN; + writel(value, ioaddr + GMAC_CONFIG); +} + +static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool ipv6, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC_PACKET_FILTER); + value |= GMAC_PACKET_FILTER_IPFE; + writel(value, ioaddr + GMAC_PACKET_FILTER); + + value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no)); + + /* For IPv6 not both SA/DA filters can be active */ + if (ipv6) { + value |= GMAC_L3PEN0; + value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0); + value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0); + if (sa) { + value |= GMAC_L3SAM0; + if (inv) + value |= GMAC_L3SAIM0; + } else { + value |= GMAC_L3DAM0; + if (inv) + value |= GMAC_L3DAIM0; + } + } else { + value &= ~GMAC_L3PEN0; + if (sa) { + value |= GMAC_L3SAM0; + if (inv) + value |= GMAC_L3SAIM0; + } else { + value |= GMAC_L3DAM0; + if (inv) + value |= GMAC_L3DAIM0; + } + } + + writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no)); + + if (sa) { + writel(match, ioaddr + GMAC_L3_ADDR0(filter_no)); + } else { + writel(match, ioaddr + GMAC_L3_ADDR1(filter_no)); + } + + if (!en) + writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no)); + + return 0; +} + +static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool udp, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + GMAC_PACKET_FILTER); + value |= GMAC_PACKET_FILTER_IPFE; + writel(value, ioaddr + GMAC_PACKET_FILTER); + + value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no)); + if (udp) { + value |= GMAC_L4PEN0; + } else { + value &= ~GMAC_L4PEN0; + } + + value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0); + value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0); + if (sa) { + value |= GMAC_L4SPM0; + if (inv) + value |= GMAC_L4SPIM0; + } else { + value |= GMAC_L4DPM0; + if (inv) + value |= GMAC_L4DPIM0; + } + + writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no)); + + if (sa) { + value = match & GMAC_L4SP0; + } else { + value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0; + } + + writel(value, ioaddr + GMAC_L4_ADDR(filter_no)); + + if (!en) + writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no)); + + return 0; +} + +const struct stmmac_ops dwmac4_ops = { + .core_init = dwmac4_core_init, + .set_mac = stmmac_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, + .rx_queue_prio = dwmac4_rx_queue_priority, + .tx_queue_prio = dwmac4_tx_queue_priority, + .rx_queue_routing = dwmac4_rx_queue_routing, + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwmac4_map_mtl_dma, + .config_cbs = dwmac4_config_cbs, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .host_mtl_irq_status = dwmac4_irq_mtl_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, + .set_mac_loopback = dwmac4_set_mac_loopback, + .update_vlan_hash = dwmac4_update_vlan_hash, + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, + .config_l3_filter = dwmac4_config_l3_filter, + .config_l4_filter = dwmac4_config_l4_filter, + .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, + .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, + .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, +}; + +const struct stmmac_ops dwmac410_ops = { + .core_init = dwmac4_core_init, + .set_mac = stmmac_dwmac4_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, + .rx_queue_prio = dwmac4_rx_queue_priority, + .tx_queue_prio = dwmac4_tx_queue_priority, + .rx_queue_routing = dwmac4_rx_queue_routing, + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwmac4_map_mtl_dma, + .config_cbs = dwmac4_config_cbs, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .host_mtl_irq_status = dwmac4_irq_mtl_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, + .flex_pps_config = dwmac5_flex_pps_config, + .set_mac_loopback = dwmac4_set_mac_loopback, + .update_vlan_hash = dwmac4_update_vlan_hash, + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, + .config_l3_filter = dwmac4_config_l3_filter, + .config_l4_filter = dwmac4_config_l4_filter, + .est_configure = dwmac5_est_configure, + .est_irq_status = dwmac5_est_irq_status, + .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, + .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, + .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, + .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, +}; + +const struct stmmac_ops dwmac510_ops = { + .core_init = dwmac4_core_init, + .set_mac = stmmac_dwmac4_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, + .rx_queue_prio = dwmac4_rx_queue_priority, + .tx_queue_prio = dwmac4_tx_queue_priority, + .rx_queue_routing = dwmac4_rx_queue_routing, + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwmac4_map_mtl_dma, + .config_cbs = dwmac4_config_cbs, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .host_mtl_irq_status = dwmac4_irq_mtl_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, + .safety_feat_config = dwmac5_safety_feat_config, + .safety_feat_irq_status = dwmac5_safety_feat_irq_status, + .safety_feat_dump = dwmac5_safety_feat_dump, + .rxp_config = dwmac5_rxp_config, + .flex_pps_config = dwmac5_flex_pps_config, + .set_mac_loopback = dwmac4_set_mac_loopback, + .update_vlan_hash = dwmac4_update_vlan_hash, + .sarc_configure = dwmac4_sarc_configure, + .enable_vlan = dwmac4_enable_vlan, + .set_arp_offload = dwmac4_set_arp_offload, + .config_l3_filter = dwmac4_config_l3_filter, + .config_l4_filter = dwmac4_config_l4_filter, + .est_configure = dwmac5_est_configure, + .est_irq_status = dwmac5_est_irq_status, + .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, + .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, + .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, + .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, +}; + +static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) +{ + u32 val, num_vlan; + + val = readl(ioaddr + GMAC_HW_FEATURE3); + switch (val & GMAC_HW_FEAT_NRVF) { + case 0: + num_vlan = 1; + break; + case 1: + num_vlan = 4; + break; + case 2: + num_vlan = 8; + break; + case 3: + num_vlan = 16; + break; + case 4: + num_vlan = 24; + break; + case 5: + num_vlan = 32; + break; + default: + num_vlan = 1; + } + + return num_vlan; +} + +int dwmac4_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tDWMAC4/5\n"); + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.duplex = GMAC_CONFIG_DM; + mac->link.speed10 = GMAC_CONFIG_PS; + mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS; + mac->link.speed1000 = 0; + mac->link.speed2500 = GMAC_CONFIG_FES; + mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS; + mac->mii.addr = GMAC_MDIO_ADDR; + mac->mii.data = GMAC_MDIO_DATA; + mac->mii.addr_shift = 21; + mac->mii.addr_mask = GENMASK(25, 21); + mac->mii.reg_shift = 16; + mac->mii.reg_mask = GENMASK(20, 16); + mac->mii.clk_csr_shift = 8; + mac->mii.clk_csr_mask = GENMASK(11, 8); + mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr); + + return 0; +} diff --git a/devices/stmmac/dwmac4_descs-6.1-ethercat.c b/devices/stmmac/dwmac4_descs-6.1-ethercat.c new file mode 100644 index 00000000..cc429ff5 --- /dev/null +++ b/devices/stmmac/dwmac4_descs-6.1-ethercat.c @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This contains the functions to handle the descriptors for DesignWare databook + * 4.xx. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#include +#include "common-6.1-ethercat.h" +#include "dwmac4-6.1-ethercat.h" +#include "dwmac4_descs-6.1-ethercat.h" + +static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, + void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes3; + int ret = tx_done; + + tdes3 = le32_to_cpu(p->des3); + + /* Get tx owner first */ + if (unlikely(tdes3 & TDES3_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR))) + return tx_not_ls; + + if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) { + ret = tx_err; + + if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT)) + x->tx_jabber++; + if (unlikely(tdes3 & TDES3_PACKET_FLUSHED)) + x->tx_frame_flushed++; + if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(tdes3 & TDES3_NO_CARRIER)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely((tdes3 & TDES3_LATE_COLLISION) || + (tdes3 & TDES3_EXCESSIVE_COLLISION))) + stats->collisions += + (tdes3 & TDES3_COLLISION_COUNT_MASK) + >> TDES3_COLLISION_COUNT_SHIFT; + + if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL)) + x->tx_deferred++; + + if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) { + x->tx_underflow++; + ret |= tx_err_bump_tc; + } + + if (unlikely(tdes3 & TDES3_IP_HDR_ERROR)) + x->tx_ip_header_error++; + + if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR)) + x->tx_payload_error++; + } + + if (unlikely(tdes3 & TDES3_DEFERRED)) + x->tx_deferred++; + + return ret; +} + +static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes2 = le32_to_cpu(p->des2); + unsigned int rdes3 = le32_to_cpu(p->des3); + int message_type; + int ret = good_frame; + + if (unlikely(rdes3 & RDES3_OWN)) + return dma_own; + + if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR)) + return discard_frame; + if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR))) + return rx_not_ls; + + if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) { + if (unlikely(rdes3 & RDES3_GIANT_PACKET)) + stats->rx_length_errors++; + if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR)) + x->rx_gmac_overflow++; + + if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG)) + x->rx_watchdog++; + + if (unlikely(rdes3 & RDES3_RECEIVE_ERROR)) + x->rx_mii++; + + if (unlikely(rdes3 & RDES3_CRC_ERROR)) { + x->rx_crc_errors++; + stats->rx_crc_errors++; + } + + if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR)) + x->dribbling_bit++; + + ret = discard_frame; + } + + message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8; + + if (rdes1 & RDES1_IP_HDR_ERROR) + x->ip_hdr_err++; + if (rdes1 & RDES1_IP_CSUM_BYPASSED) + x->ip_csum_bypassed++; + if (rdes1 & RDES1_IPV4_HEADER) + x->ipv4_pkt_rcvd++; + if (rdes1 & RDES1_IPV6_HEADER) + x->ipv6_pkt_rcvd++; + + if (message_type == RDES_EXT_NO_PTP) + x->no_ptp_rx_msg_type_ext++; + else if (message_type == RDES_EXT_SYNC) + x->ptp_rx_msg_type_sync++; + else if (message_type == RDES_EXT_FOLLOW_UP) + x->ptp_rx_msg_type_follow_up++; + else if (message_type == RDES_EXT_DELAY_REQ) + x->ptp_rx_msg_type_delay_req++; + else if (message_type == RDES_EXT_DELAY_RESP) + x->ptp_rx_msg_type_delay_resp++; + else if (message_type == RDES_EXT_PDELAY_REQ) + x->ptp_rx_msg_type_pdelay_req++; + else if (message_type == RDES_EXT_PDELAY_RESP) + x->ptp_rx_msg_type_pdelay_resp++; + else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) + x->ptp_rx_msg_type_pdelay_follow_up++; + else if (message_type == RDES_PTP_ANNOUNCE) + x->ptp_rx_msg_type_announce++; + else if (message_type == RDES_PTP_MANAGEMENT) + x->ptp_rx_msg_type_management++; + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) + x->ptp_rx_msg_pkt_reserved_type++; + + if (rdes1 & RDES1_PTP_PACKET_TYPE) + x->ptp_frame_type++; + if (rdes1 & RDES1_PTP_VER) + x->ptp_ver++; + if (rdes1 & RDES1_TIMESTAMP_DROPPED) + x->timestamp_dropped++; + + if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) { + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) { + x->da_rx_filter_fail++; + ret = discard_frame; + } + + if (rdes2 & RDES2_L3_FILTER_MATCH) + x->l3_filter_match++; + if (rdes2 & RDES2_L4_FILTER_MATCH) + x->l4_filter_match++; + if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK) + >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT) + x->l3_l4_filter_no_match++; + + return ret; +} + +static int dwmac4_rd_get_tx_len(struct dma_desc *p) +{ + return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK); +} + +static int dwmac4_get_tx_owner(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT; +} + +static void dwmac4_set_tx_owner(struct dma_desc *p) +{ + p->des3 |= cpu_to_le32(TDES3_OWN); +} + +static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic) +{ + p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); + + if (!disable_rx_ic) + p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); +} + +static int dwmac4_get_tx_ls(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR) + >> TDES3_LAST_DESCRIPTOR_SHIFT; +} + +static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe) +{ + return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK); +} + +static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE); +} + +static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) +{ + /* Context type from W/B descriptor must be zero */ + if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) + return 0; + + /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ + if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) + return 1; + + return 0; +} + +static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + + ns = le32_to_cpu(p->des0); + /* convert high/sec time stamp value to nanosecond */ + ns += le32_to_cpu(p->des1) * 1000000000ULL; + + *ts = ns; +} + +static int dwmac4_rx_check_timestamp(void *desc) +{ + struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes0 = le32_to_cpu(p->des0); + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes3 = le32_to_cpu(p->des3); + u32 own, ctxt; + int ret = 1; + + own = rdes3 & RDES3_OWN; + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) + >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); + + if (likely(!own && ctxt)) { + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) + /* Corrupted value */ + ret = -EINVAL; + else + /* A valid Timestamp is ready to be read */ + ret = 0; + } + + /* Timestamp not ready */ + return ret; +} + +static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + int ret = -EINVAL; + + /* Get the status from normal w/b descriptor */ + if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) { + if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) { + int i = 0; + + /* Check if timestamp is OK from context descriptor */ + do { + ret = dwmac4_rx_check_timestamp(next_desc); + if (ret < 0) + goto exit; + i++; + + } while ((ret == 1) && (i < 10)); + + if (i == 10) + ret = -EBUSY; + } + } +exit: + if (likely(ret == 0)) + return 1; + + return 0; +} + +static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end, int bfsize) +{ + dwmac4_set_rx_owner(p, disable_rx_ic); +} + +static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls, unsigned int tot_pkt_len) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK); + + tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK; + if (is_fs) + tdes3 |= TDES3_FIRST_DESCRIPTOR; + else + tdes3 &= ~TDES3_FIRST_DESCRIPTOR; + + if (likely(csum_flag)) + tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT); + else + tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT); + + if (ls) + tdes3 |= TDES3_LAST_DESCRIPTOR; + else + tdes3 &= ~TDES3_LAST_DESCRIPTOR; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, + int len1, int len2, bool tx_own, + bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + if (len1) + p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK)); + + if (len2) + p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT) + & TDES2_BUFFER2_SIZE_MASK); + + if (is_fs) { + tdes3 |= TDES3_FIRST_DESCRIPTOR | + TDES3_TCP_SEGMENTATION_ENABLE | + ((tcphdrlen << TDES3_HDR_LEN_SHIFT) & + TDES3_SLOT_NUMBER_MASK) | + ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK)); + } else { + tdes3 &= ~TDES3_FIRST_DESCRIPTOR; + } + + if (ls) + tdes3 |= TDES3_LAST_DESCRIPTOR; + else + tdes3 &= ~TDES3_LAST_DESCRIPTOR; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwmac4_release_tx_desc(struct dma_desc *p, int mode) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_rd_set_tx_ic(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION); +} + +static void dwmac4_display_ring(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size) +{ + dma_addr_t dma_addr; + int i; + + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); + + if (desc_size == sizeof(struct dma_desc)) { + struct dma_desc *p = (struct dma_desc *)head; + + for (i = 0; i < size; i++) { + dma_addr = dma_rx_phy + i * sizeof(*p); + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(p->des0), le32_to_cpu(p->des1), + le32_to_cpu(p->des2), le32_to_cpu(p->des3)); + p++; + } + } else if (desc_size == sizeof(struct dma_extended_desc)) { + struct dma_extended_desc *extp = (struct dma_extended_desc *)head; + + for (i = 0; i < size; i++) { + dma_addr = dma_rx_phy + i * sizeof(*extp); + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1), + le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3), + le32_to_cpu(extp->des4), le32_to_cpu(extp->des5), + le32_to_cpu(extp->des6), le32_to_cpu(extp->des7)); + extp++; + } + } else if (desc_size == sizeof(struct dma_edesc)) { + struct dma_edesc *ep = (struct dma_edesc *)head; + + for (i = 0; i < size; i++) { + dma_addr = dma_rx_phy + i * sizeof(*ep); + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(ep->des4), le32_to_cpu(ep->des5), + le32_to_cpu(ep->des6), le32_to_cpu(ep->des7), + le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1), + le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3)); + ep++; + } + } else { + pr_err("unsupported descriptor!"); + } +} + +static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = cpu_to_le32(mss); + p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); +} + +static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des0 = cpu_to_le32(lower_32_bits(addr)); + p->des1 = cpu_to_le32(upper_32_bits(addr)); +} + +static void dwmac4_clear(struct dma_desc *p) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type) +{ + sarc_type <<= TDES3_SA_INSERT_CTRL_SHIFT; + + p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK); +} + +static int set_16kib_bfsize(int mtu) +{ + int ret = 0; + + if (unlikely(mtu >= BUF_SIZE_8KiB)) + ret = BUF_SIZE_16KiB; + return ret; +} + +static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag, + u32 inner_type) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + + /* Inner VLAN */ + if (inner_type) { + u32 des = inner_tag << TDES2_IVT_SHIFT; + + des &= TDES2_IVT_MASK; + p->des2 = cpu_to_le32(des); + + des = inner_type << TDES3_IVTIR_SHIFT; + des &= TDES3_IVTIR_MASK; + p->des3 = cpu_to_le32(des | TDES3_IVLTV); + } + + /* Outer VLAN */ + p->des3 |= cpu_to_le32(tag & TDES3_VLAN_TAG); + p->des3 |= cpu_to_le32(TDES3_VLTV); + + p->des3 |= cpu_to_le32(TDES3_CONTEXT_TYPE); +} + +static void dwmac4_set_vlan(struct dma_desc *p, u32 type) +{ + type <<= TDES2_VLAN_TAG_SHIFT; + p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK); +} + +static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len) +{ + *len = le32_to_cpu(p->des2) & RDES2_HL; +} + +static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool buf2_valid) +{ + p->des2 = cpu_to_le32(lower_32_bits(addr)); + p->des3 = cpu_to_le32(upper_32_bits(addr)); + + if (buf2_valid) + p->des3 |= cpu_to_le32(RDES3_BUFFER2_VALID_ADDR); + else + p->des3 &= cpu_to_le32(~RDES3_BUFFER2_VALID_ADDR); +} + +static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec) +{ + p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV); + p->des5 = cpu_to_le32(nsec & TDES5_LT); + p->des6 = 0; + p->des7 = 0; +} + +const struct stmmac_desc_ops dwmac4_desc_ops = { + .tx_status = dwmac4_wrback_get_tx_status, + .rx_status = dwmac4_wrback_get_rx_status, + .get_tx_len = dwmac4_rd_get_tx_len, + .get_tx_owner = dwmac4_get_tx_owner, + .set_tx_owner = dwmac4_set_tx_owner, + .set_rx_owner = dwmac4_set_rx_owner, + .get_tx_ls = dwmac4_get_tx_ls, + .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, + .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, + .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, + .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status, + .get_timestamp = dwmac4_get_timestamp, + .set_tx_ic = dwmac4_rd_set_tx_ic, + .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, + .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, + .release_tx_desc = dwmac4_release_tx_desc, + .init_rx_desc = dwmac4_rd_init_rx_desc, + .init_tx_desc = dwmac4_rd_init_tx_desc, + .display_ring = dwmac4_display_ring, + .set_mss = dwmac4_set_mss_ctxt, + .set_addr = dwmac4_set_addr, + .clear = dwmac4_clear, + .set_sarc = dwmac4_set_sarc, + .set_vlan_tag = dwmac4_set_vlan_tag, + .set_vlan = dwmac4_set_vlan, + .get_rx_header_len = dwmac4_get_rx_header_len, + .set_sec_addr = dwmac4_set_sec_addr, + .set_tbs = dwmac4_set_tbs, +}; + +const struct stmmac_mode_ops dwmac4_ring_mode_ops = { + .set_16kib_bfsize = set_16kib_bfsize, +}; diff --git a/devices/stmmac/dwmac4_descs-6.1-ethercat.h b/devices/stmmac/dwmac4_descs-6.1-ethercat.h new file mode 100644 index 00000000..6da070cc --- /dev/null +++ b/devices/stmmac/dwmac4_descs-6.1-ethercat.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Header File to describe the DMA descriptors and related definitions specific + * for DesignWare databook 4.xx. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#ifndef __DWMAC4_DESCS_H__ +#define __DWMAC4_DESCS_H__ + +#include + +/* Normal transmit descriptor defines (without split feature) */ + +/* TDES2 (read format) */ +#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0) +#define TDES2_VLAN_TAG_MASK GENMASK(15, 14) +#define TDES2_VLAN_TAG_SHIFT 14 +#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16) +#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16 +#define TDES3_IVTIR_MASK GENMASK(19, 18) +#define TDES3_IVTIR_SHIFT 18 +#define TDES3_IVLTV BIT(17) +#define TDES2_TIMESTAMP_ENABLE BIT(30) +#define TDES2_IVT_MASK GENMASK(31, 16) +#define TDES2_IVT_SHIFT 16 +#define TDES2_INTERRUPT_ON_COMPLETION BIT(31) + +/* TDES3 (read format) */ +#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0) +#define TDES3_VLAN_TAG GENMASK(15, 0) +#define TDES3_VLTV BIT(16) +#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16) +#define TDES3_CHECKSUM_INSERTION_SHIFT 16 +#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0) +#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18) +#define TDES3_HDR_LEN_SHIFT 19 +#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19) +#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23) +#define TDES3_SA_INSERT_CTRL_SHIFT 23 +#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26) + +/* TDES3 (write back format) */ +#define TDES3_IP_HDR_ERROR BIT(0) +#define TDES3_DEFERRED BIT(1) +#define TDES3_UNDERFLOW_ERROR BIT(2) +#define TDES3_EXCESSIVE_DEFERRAL BIT(3) +#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4) +#define TDES3_COLLISION_COUNT_SHIFT 4 +#define TDES3_EXCESSIVE_COLLISION BIT(8) +#define TDES3_LATE_COLLISION BIT(9) +#define TDES3_NO_CARRIER BIT(10) +#define TDES3_LOSS_CARRIER BIT(11) +#define TDES3_PAYLOAD_ERROR BIT(12) +#define TDES3_PACKET_FLUSHED BIT(13) +#define TDES3_JABBER_TIMEOUT BIT(14) +#define TDES3_ERROR_SUMMARY BIT(15) +#define TDES3_TIMESTAMP_STATUS BIT(17) +#define TDES3_TIMESTAMP_STATUS_SHIFT 17 + +/* TDES3 context */ +#define TDES3_CTXT_TCMSSV BIT(26) + +/* TDES3 Common */ +#define TDES3_RS1V BIT(26) +#define TDES3_RS1V_SHIFT 26 +#define TDES3_LAST_DESCRIPTOR BIT(28) +#define TDES3_LAST_DESCRIPTOR_SHIFT 28 +#define TDES3_FIRST_DESCRIPTOR BIT(29) +#define TDES3_CONTEXT_TYPE BIT(30) +#define TDES3_CONTEXT_TYPE_SHIFT 30 + +/* TDES4 */ +#define TDES4_LTV BIT(31) +#define TDES4_LT GENMASK(7, 0) + +/* TDES5 */ +#define TDES5_LT GENMASK(31, 8) + +/* TDS3 use for both format (read and write back) */ +#define TDES3_OWN BIT(31) +#define TDES3_OWN_SHIFT 31 + +/* Normal receive descriptor defines (without split feature) */ + +/* RDES0 (write back format) */ +#define RDES0_VLAN_TAG_MASK GENMASK(15, 0) + +/* RDES1 (write back format) */ +#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0) +#define RDES1_IP_HDR_ERROR BIT(3) +#define RDES1_IPV4_HEADER BIT(4) +#define RDES1_IPV6_HEADER BIT(5) +#define RDES1_IP_CSUM_BYPASSED BIT(6) +#define RDES1_IP_CSUM_ERROR BIT(7) +#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8) +#define RDES1_PTP_PACKET_TYPE BIT(12) +#define RDES1_PTP_VER BIT(13) +#define RDES1_TIMESTAMP_AVAILABLE BIT(14) +#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14 +#define RDES1_TIMESTAMP_DROPPED BIT(15) +#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16) + +/* RDES2 (write back format) */ +#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0) +#define RDES2_VLAN_FILTER_STATUS BIT(15) +#define RDES2_SA_FILTER_FAIL BIT(16) +#define RDES2_DA_FILTER_FAIL BIT(17) +#define RDES2_HASH_FILTER_STATUS BIT(18) +#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19) +#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19) +#define RDES2_L3_FILTER_MATCH BIT(27) +#define RDES2_L4_FILTER_MATCH BIT(28) +#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26) +#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26 +#define RDES2_HL GENMASK(9, 0) + +/* RDES3 (write back format) */ +#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0) +#define RDES3_ERROR_SUMMARY BIT(15) +#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16) +#define RDES3_DRIBBLE_ERROR BIT(19) +#define RDES3_RECEIVE_ERROR BIT(20) +#define RDES3_OVERFLOW_ERROR BIT(21) +#define RDES3_RECEIVE_WATCHDOG BIT(22) +#define RDES3_GIANT_PACKET BIT(23) +#define RDES3_CRC_ERROR BIT(24) +#define RDES3_RDES0_VALID BIT(25) +#define RDES3_RDES1_VALID BIT(26) +#define RDES3_RDES2_VALID BIT(27) +#define RDES3_LAST_DESCRIPTOR BIT(28) +#define RDES3_FIRST_DESCRIPTOR BIT(29) +#define RDES3_CONTEXT_DESCRIPTOR BIT(30) +#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30 + +/* RDES3 (read format) */ +#define RDES3_BUFFER1_VALID_ADDR BIT(24) +#define RDES3_BUFFER2_VALID_ADDR BIT(25) +#define RDES3_INT_ON_COMPLETION_EN BIT(30) + +/* TDS3 use for both format (read and write back) */ +#define RDES3_OWN BIT(31) + +#endif /* __DWMAC4_DESCS_H__ */ diff --git a/devices/stmmac/dwmac4_descs-6.1-orig.c b/devices/stmmac/dwmac4_descs-6.1-orig.c new file mode 100644 index 00000000..8cc80b1d --- /dev/null +++ b/devices/stmmac/dwmac4_descs-6.1-orig.c @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This contains the functions to handle the descriptors for DesignWare databook + * 4.xx. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#include +#include "common.h" +#include "dwmac4.h" +#include "dwmac4_descs.h" + +static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, + void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes3; + int ret = tx_done; + + tdes3 = le32_to_cpu(p->des3); + + /* Get tx owner first */ + if (unlikely(tdes3 & TDES3_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR))) + return tx_not_ls; + + if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) { + ret = tx_err; + + if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT)) + x->tx_jabber++; + if (unlikely(tdes3 & TDES3_PACKET_FLUSHED)) + x->tx_frame_flushed++; + if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(tdes3 & TDES3_NO_CARRIER)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely((tdes3 & TDES3_LATE_COLLISION) || + (tdes3 & TDES3_EXCESSIVE_COLLISION))) + stats->collisions += + (tdes3 & TDES3_COLLISION_COUNT_MASK) + >> TDES3_COLLISION_COUNT_SHIFT; + + if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL)) + x->tx_deferred++; + + if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) { + x->tx_underflow++; + ret |= tx_err_bump_tc; + } + + if (unlikely(tdes3 & TDES3_IP_HDR_ERROR)) + x->tx_ip_header_error++; + + if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR)) + x->tx_payload_error++; + } + + if (unlikely(tdes3 & TDES3_DEFERRED)) + x->tx_deferred++; + + return ret; +} + +static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes2 = le32_to_cpu(p->des2); + unsigned int rdes3 = le32_to_cpu(p->des3); + int message_type; + int ret = good_frame; + + if (unlikely(rdes3 & RDES3_OWN)) + return dma_own; + + if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR)) + return discard_frame; + if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR))) + return rx_not_ls; + + if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) { + if (unlikely(rdes3 & RDES3_GIANT_PACKET)) + stats->rx_length_errors++; + if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR)) + x->rx_gmac_overflow++; + + if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG)) + x->rx_watchdog++; + + if (unlikely(rdes3 & RDES3_RECEIVE_ERROR)) + x->rx_mii++; + + if (unlikely(rdes3 & RDES3_CRC_ERROR)) { + x->rx_crc_errors++; + stats->rx_crc_errors++; + } + + if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR)) + x->dribbling_bit++; + + ret = discard_frame; + } + + message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8; + + if (rdes1 & RDES1_IP_HDR_ERROR) + x->ip_hdr_err++; + if (rdes1 & RDES1_IP_CSUM_BYPASSED) + x->ip_csum_bypassed++; + if (rdes1 & RDES1_IPV4_HEADER) + x->ipv4_pkt_rcvd++; + if (rdes1 & RDES1_IPV6_HEADER) + x->ipv6_pkt_rcvd++; + + if (message_type == RDES_EXT_NO_PTP) + x->no_ptp_rx_msg_type_ext++; + else if (message_type == RDES_EXT_SYNC) + x->ptp_rx_msg_type_sync++; + else if (message_type == RDES_EXT_FOLLOW_UP) + x->ptp_rx_msg_type_follow_up++; + else if (message_type == RDES_EXT_DELAY_REQ) + x->ptp_rx_msg_type_delay_req++; + else if (message_type == RDES_EXT_DELAY_RESP) + x->ptp_rx_msg_type_delay_resp++; + else if (message_type == RDES_EXT_PDELAY_REQ) + x->ptp_rx_msg_type_pdelay_req++; + else if (message_type == RDES_EXT_PDELAY_RESP) + x->ptp_rx_msg_type_pdelay_resp++; + else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) + x->ptp_rx_msg_type_pdelay_follow_up++; + else if (message_type == RDES_PTP_ANNOUNCE) + x->ptp_rx_msg_type_announce++; + else if (message_type == RDES_PTP_MANAGEMENT) + x->ptp_rx_msg_type_management++; + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) + x->ptp_rx_msg_pkt_reserved_type++; + + if (rdes1 & RDES1_PTP_PACKET_TYPE) + x->ptp_frame_type++; + if (rdes1 & RDES1_PTP_VER) + x->ptp_ver++; + if (rdes1 & RDES1_TIMESTAMP_DROPPED) + x->timestamp_dropped++; + + if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) { + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) { + x->da_rx_filter_fail++; + ret = discard_frame; + } + + if (rdes2 & RDES2_L3_FILTER_MATCH) + x->l3_filter_match++; + if (rdes2 & RDES2_L4_FILTER_MATCH) + x->l4_filter_match++; + if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK) + >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT) + x->l3_l4_filter_no_match++; + + return ret; +} + +static int dwmac4_rd_get_tx_len(struct dma_desc *p) +{ + return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK); +} + +static int dwmac4_get_tx_owner(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT; +} + +static void dwmac4_set_tx_owner(struct dma_desc *p) +{ + p->des3 |= cpu_to_le32(TDES3_OWN); +} + +static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic) +{ + p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); + + if (!disable_rx_ic) + p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); +} + +static int dwmac4_get_tx_ls(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR) + >> TDES3_LAST_DESCRIPTOR_SHIFT; +} + +static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe) +{ + return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK); +} + +static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE); +} + +static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) +{ + /* Context type from W/B descriptor must be zero */ + if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) + return 0; + + /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ + if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) + return 1; + + return 0; +} + +static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + + ns = le32_to_cpu(p->des0); + /* convert high/sec time stamp value to nanosecond */ + ns += le32_to_cpu(p->des1) * 1000000000ULL; + + *ts = ns; +} + +static int dwmac4_rx_check_timestamp(void *desc) +{ + struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes0 = le32_to_cpu(p->des0); + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes3 = le32_to_cpu(p->des3); + u32 own, ctxt; + int ret = 1; + + own = rdes3 & RDES3_OWN; + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) + >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); + + if (likely(!own && ctxt)) { + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) + /* Corrupted value */ + ret = -EINVAL; + else + /* A valid Timestamp is ready to be read */ + ret = 0; + } + + /* Timestamp not ready */ + return ret; +} + +static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + int ret = -EINVAL; + + /* Get the status from normal w/b descriptor */ + if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) { + if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) { + int i = 0; + + /* Check if timestamp is OK from context descriptor */ + do { + ret = dwmac4_rx_check_timestamp(next_desc); + if (ret < 0) + goto exit; + i++; + + } while ((ret == 1) && (i < 10)); + + if (i == 10) + ret = -EBUSY; + } + } +exit: + if (likely(ret == 0)) + return 1; + + return 0; +} + +static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end, int bfsize) +{ + dwmac4_set_rx_owner(p, disable_rx_ic); +} + +static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls, unsigned int tot_pkt_len) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK); + + tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK; + if (is_fs) + tdes3 |= TDES3_FIRST_DESCRIPTOR; + else + tdes3 &= ~TDES3_FIRST_DESCRIPTOR; + + if (likely(csum_flag)) + tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT); + else + tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT); + + if (ls) + tdes3 |= TDES3_LAST_DESCRIPTOR; + else + tdes3 &= ~TDES3_LAST_DESCRIPTOR; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, + int len1, int len2, bool tx_own, + bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + if (len1) + p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK)); + + if (len2) + p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT) + & TDES2_BUFFER2_SIZE_MASK); + + if (is_fs) { + tdes3 |= TDES3_FIRST_DESCRIPTOR | + TDES3_TCP_SEGMENTATION_ENABLE | + ((tcphdrlen << TDES3_HDR_LEN_SHIFT) & + TDES3_SLOT_NUMBER_MASK) | + ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK)); + } else { + tdes3 &= ~TDES3_FIRST_DESCRIPTOR; + } + + if (ls) + tdes3 |= TDES3_LAST_DESCRIPTOR; + else + tdes3 &= ~TDES3_LAST_DESCRIPTOR; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwmac4_release_tx_desc(struct dma_desc *p, int mode) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_rd_set_tx_ic(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION); +} + +static void dwmac4_display_ring(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size) +{ + dma_addr_t dma_addr; + int i; + + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); + + if (desc_size == sizeof(struct dma_desc)) { + struct dma_desc *p = (struct dma_desc *)head; + + for (i = 0; i < size; i++) { + dma_addr = dma_rx_phy + i * sizeof(*p); + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(p->des0), le32_to_cpu(p->des1), + le32_to_cpu(p->des2), le32_to_cpu(p->des3)); + p++; + } + } else if (desc_size == sizeof(struct dma_extended_desc)) { + struct dma_extended_desc *extp = (struct dma_extended_desc *)head; + + for (i = 0; i < size; i++) { + dma_addr = dma_rx_phy + i * sizeof(*extp); + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1), + le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3), + le32_to_cpu(extp->des4), le32_to_cpu(extp->des5), + le32_to_cpu(extp->des6), le32_to_cpu(extp->des7)); + extp++; + } + } else if (desc_size == sizeof(struct dma_edesc)) { + struct dma_edesc *ep = (struct dma_edesc *)head; + + for (i = 0; i < size; i++) { + dma_addr = dma_rx_phy + i * sizeof(*ep); + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(ep->des4), le32_to_cpu(ep->des5), + le32_to_cpu(ep->des6), le32_to_cpu(ep->des7), + le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1), + le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3)); + ep++; + } + } else { + pr_err("unsupported descriptor!"); + } +} + +static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = cpu_to_le32(mss); + p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); +} + +static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des0 = cpu_to_le32(lower_32_bits(addr)); + p->des1 = cpu_to_le32(upper_32_bits(addr)); +} + +static void dwmac4_clear(struct dma_desc *p) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type) +{ + sarc_type <<= TDES3_SA_INSERT_CTRL_SHIFT; + + p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK); +} + +static int set_16kib_bfsize(int mtu) +{ + int ret = 0; + + if (unlikely(mtu >= BUF_SIZE_8KiB)) + ret = BUF_SIZE_16KiB; + return ret; +} + +static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag, + u32 inner_type) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + + /* Inner VLAN */ + if (inner_type) { + u32 des = inner_tag << TDES2_IVT_SHIFT; + + des &= TDES2_IVT_MASK; + p->des2 = cpu_to_le32(des); + + des = inner_type << TDES3_IVTIR_SHIFT; + des &= TDES3_IVTIR_MASK; + p->des3 = cpu_to_le32(des | TDES3_IVLTV); + } + + /* Outer VLAN */ + p->des3 |= cpu_to_le32(tag & TDES3_VLAN_TAG); + p->des3 |= cpu_to_le32(TDES3_VLTV); + + p->des3 |= cpu_to_le32(TDES3_CONTEXT_TYPE); +} + +static void dwmac4_set_vlan(struct dma_desc *p, u32 type) +{ + type <<= TDES2_VLAN_TAG_SHIFT; + p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK); +} + +static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len) +{ + *len = le32_to_cpu(p->des2) & RDES2_HL; +} + +static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool buf2_valid) +{ + p->des2 = cpu_to_le32(lower_32_bits(addr)); + p->des3 = cpu_to_le32(upper_32_bits(addr)); + + if (buf2_valid) + p->des3 |= cpu_to_le32(RDES3_BUFFER2_VALID_ADDR); + else + p->des3 &= cpu_to_le32(~RDES3_BUFFER2_VALID_ADDR); +} + +static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec) +{ + p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV); + p->des5 = cpu_to_le32(nsec & TDES5_LT); + p->des6 = 0; + p->des7 = 0; +} + +const struct stmmac_desc_ops dwmac4_desc_ops = { + .tx_status = dwmac4_wrback_get_tx_status, + .rx_status = dwmac4_wrback_get_rx_status, + .get_tx_len = dwmac4_rd_get_tx_len, + .get_tx_owner = dwmac4_get_tx_owner, + .set_tx_owner = dwmac4_set_tx_owner, + .set_rx_owner = dwmac4_set_rx_owner, + .get_tx_ls = dwmac4_get_tx_ls, + .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, + .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, + .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, + .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status, + .get_timestamp = dwmac4_get_timestamp, + .set_tx_ic = dwmac4_rd_set_tx_ic, + .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, + .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, + .release_tx_desc = dwmac4_release_tx_desc, + .init_rx_desc = dwmac4_rd_init_rx_desc, + .init_tx_desc = dwmac4_rd_init_tx_desc, + .display_ring = dwmac4_display_ring, + .set_mss = dwmac4_set_mss_ctxt, + .set_addr = dwmac4_set_addr, + .clear = dwmac4_clear, + .set_sarc = dwmac4_set_sarc, + .set_vlan_tag = dwmac4_set_vlan_tag, + .set_vlan = dwmac4_set_vlan, + .get_rx_header_len = dwmac4_get_rx_header_len, + .set_sec_addr = dwmac4_set_sec_addr, + .set_tbs = dwmac4_set_tbs, +}; + +const struct stmmac_mode_ops dwmac4_ring_mode_ops = { + .set_16kib_bfsize = set_16kib_bfsize, +}; diff --git a/devices/stmmac/dwmac4_descs-6.1-orig.h b/devices/stmmac/dwmac4_descs-6.1-orig.h new file mode 100644 index 00000000..6da070cc --- /dev/null +++ b/devices/stmmac/dwmac4_descs-6.1-orig.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Header File to describe the DMA descriptors and related definitions specific + * for DesignWare databook 4.xx. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#ifndef __DWMAC4_DESCS_H__ +#define __DWMAC4_DESCS_H__ + +#include + +/* Normal transmit descriptor defines (without split feature) */ + +/* TDES2 (read format) */ +#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0) +#define TDES2_VLAN_TAG_MASK GENMASK(15, 14) +#define TDES2_VLAN_TAG_SHIFT 14 +#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16) +#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16 +#define TDES3_IVTIR_MASK GENMASK(19, 18) +#define TDES3_IVTIR_SHIFT 18 +#define TDES3_IVLTV BIT(17) +#define TDES2_TIMESTAMP_ENABLE BIT(30) +#define TDES2_IVT_MASK GENMASK(31, 16) +#define TDES2_IVT_SHIFT 16 +#define TDES2_INTERRUPT_ON_COMPLETION BIT(31) + +/* TDES3 (read format) */ +#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0) +#define TDES3_VLAN_TAG GENMASK(15, 0) +#define TDES3_VLTV BIT(16) +#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16) +#define TDES3_CHECKSUM_INSERTION_SHIFT 16 +#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0) +#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18) +#define TDES3_HDR_LEN_SHIFT 19 +#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19) +#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23) +#define TDES3_SA_INSERT_CTRL_SHIFT 23 +#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26) + +/* TDES3 (write back format) */ +#define TDES3_IP_HDR_ERROR BIT(0) +#define TDES3_DEFERRED BIT(1) +#define TDES3_UNDERFLOW_ERROR BIT(2) +#define TDES3_EXCESSIVE_DEFERRAL BIT(3) +#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4) +#define TDES3_COLLISION_COUNT_SHIFT 4 +#define TDES3_EXCESSIVE_COLLISION BIT(8) +#define TDES3_LATE_COLLISION BIT(9) +#define TDES3_NO_CARRIER BIT(10) +#define TDES3_LOSS_CARRIER BIT(11) +#define TDES3_PAYLOAD_ERROR BIT(12) +#define TDES3_PACKET_FLUSHED BIT(13) +#define TDES3_JABBER_TIMEOUT BIT(14) +#define TDES3_ERROR_SUMMARY BIT(15) +#define TDES3_TIMESTAMP_STATUS BIT(17) +#define TDES3_TIMESTAMP_STATUS_SHIFT 17 + +/* TDES3 context */ +#define TDES3_CTXT_TCMSSV BIT(26) + +/* TDES3 Common */ +#define TDES3_RS1V BIT(26) +#define TDES3_RS1V_SHIFT 26 +#define TDES3_LAST_DESCRIPTOR BIT(28) +#define TDES3_LAST_DESCRIPTOR_SHIFT 28 +#define TDES3_FIRST_DESCRIPTOR BIT(29) +#define TDES3_CONTEXT_TYPE BIT(30) +#define TDES3_CONTEXT_TYPE_SHIFT 30 + +/* TDES4 */ +#define TDES4_LTV BIT(31) +#define TDES4_LT GENMASK(7, 0) + +/* TDES5 */ +#define TDES5_LT GENMASK(31, 8) + +/* TDS3 use for both format (read and write back) */ +#define TDES3_OWN BIT(31) +#define TDES3_OWN_SHIFT 31 + +/* Normal receive descriptor defines (without split feature) */ + +/* RDES0 (write back format) */ +#define RDES0_VLAN_TAG_MASK GENMASK(15, 0) + +/* RDES1 (write back format) */ +#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0) +#define RDES1_IP_HDR_ERROR BIT(3) +#define RDES1_IPV4_HEADER BIT(4) +#define RDES1_IPV6_HEADER BIT(5) +#define RDES1_IP_CSUM_BYPASSED BIT(6) +#define RDES1_IP_CSUM_ERROR BIT(7) +#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8) +#define RDES1_PTP_PACKET_TYPE BIT(12) +#define RDES1_PTP_VER BIT(13) +#define RDES1_TIMESTAMP_AVAILABLE BIT(14) +#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14 +#define RDES1_TIMESTAMP_DROPPED BIT(15) +#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16) + +/* RDES2 (write back format) */ +#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0) +#define RDES2_VLAN_FILTER_STATUS BIT(15) +#define RDES2_SA_FILTER_FAIL BIT(16) +#define RDES2_DA_FILTER_FAIL BIT(17) +#define RDES2_HASH_FILTER_STATUS BIT(18) +#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19) +#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19) +#define RDES2_L3_FILTER_MATCH BIT(27) +#define RDES2_L4_FILTER_MATCH BIT(28) +#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26) +#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26 +#define RDES2_HL GENMASK(9, 0) + +/* RDES3 (write back format) */ +#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0) +#define RDES3_ERROR_SUMMARY BIT(15) +#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16) +#define RDES3_DRIBBLE_ERROR BIT(19) +#define RDES3_RECEIVE_ERROR BIT(20) +#define RDES3_OVERFLOW_ERROR BIT(21) +#define RDES3_RECEIVE_WATCHDOG BIT(22) +#define RDES3_GIANT_PACKET BIT(23) +#define RDES3_CRC_ERROR BIT(24) +#define RDES3_RDES0_VALID BIT(25) +#define RDES3_RDES1_VALID BIT(26) +#define RDES3_RDES2_VALID BIT(27) +#define RDES3_LAST_DESCRIPTOR BIT(28) +#define RDES3_FIRST_DESCRIPTOR BIT(29) +#define RDES3_CONTEXT_DESCRIPTOR BIT(30) +#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30 + +/* RDES3 (read format) */ +#define RDES3_BUFFER1_VALID_ADDR BIT(24) +#define RDES3_BUFFER2_VALID_ADDR BIT(25) +#define RDES3_INT_ON_COMPLETION_EN BIT(30) + +/* TDS3 use for both format (read and write back) */ +#define RDES3_OWN BIT(31) + +#endif /* __DWMAC4_DESCS_H__ */ diff --git a/devices/stmmac/dwmac4_dma-6.1-ethercat.c b/devices/stmmac/dwmac4_dma-6.1-ethercat.c new file mode 100644 index 00000000..81e4d631 --- /dev/null +++ b/devices/stmmac/dwmac4_dma-6.1-ethercat.c @@ -0,0 +1,577 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + * DWC Ether MAC version 4.xx has been used for developing this code. + * + * This contains the functions to handle the dma. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#include +#include "dwmac4-6.1-ethercat.h" +#include "dwmac4_dma-6.1-ethercat.h" + +static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); + int i; + + pr_info("dwmac4: Master AXI performs %s burst length\n", + (value & DMA_SYS_BUS_FB) ? "fixed" : "any"); + + if (axi->axi_lpi_en) + value |= DMA_AXI_EN_LPI; + if (axi->axi_xit_frm) + value |= DMA_AXI_LPI_XIT_FRM; + + value &= ~DMA_AXI_WR_OSR_LMT; + value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) << + DMA_AXI_WR_OSR_LMT_SHIFT; + + value &= ~DMA_AXI_RD_OSR_LMT; + value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) << + DMA_AXI_RD_OSR_LMT_SHIFT; + + /* Depending on the UNDEF bit the Master AXI will perform any burst + * length according to the BLEN programmed (by default all BLEN are + * set). + */ + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= DMA_AXI_BLEN256; + break; + case 128: + value |= DMA_AXI_BLEN128; + break; + case 64: + value |= DMA_AXI_BLEN64; + break; + case 32: + value |= DMA_AXI_BLEN32; + break; + case 16: + value |= DMA_AXI_BLEN16; + break; + case 8: + value |= DMA_AXI_BLEN8; + break; + case 4: + value |= DMA_AXI_BLEN4; + break; + } + } + + writel(value, ioaddr + DMA_SYS_BUS_MODE); +} + +static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_rx_phy, u32 chan) +{ + u32 value; + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) + writel(upper_32_bits(dma_rx_phy), + ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan)); + + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); +} + +static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_tx_phy, u32 chan) +{ + u32 value; + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); + + /* Enable OSP to get best performance */ + value |= DMA_CONTROL_OSP; + + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) + writel(upper_32_bits(dma_tx_phy), + ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan)); + + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); +} + +static void dwmac4_dma_init_channel(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value; + + /* common channel control register config */ + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (dma_cfg->pblx8) + value = value | DMA_BUS_MODE_PBL; + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_CHAN_INTR_DEFAULT_MASK, + ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +static void dwmac410_dma_init_channel(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value; + + /* common channel control register config */ + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (dma_cfg->pblx8) + value = value | DMA_BUS_MODE_PBL; + + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, + ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +static void dwmac4_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) +{ + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); + + /* Set the Fixed burst mode */ + if (dma_cfg->fixed_burst) + value |= DMA_SYS_BUS_FB; + + /* Mixed Burst has no effect when fb is set */ + if (dma_cfg->mixed_burst) + value |= DMA_SYS_BUS_MB; + + if (dma_cfg->aal) + value |= DMA_SYS_BUS_AAL; + + if (dma_cfg->eame) + value |= DMA_SYS_BUS_EAME; + + writel(value, ioaddr + DMA_SYS_BUS_MODE); + + value = readl(ioaddr + DMA_BUS_MODE); + + if (dma_cfg->multi_msi_en) { + value &= ~DMA_BUS_MODE_INTM_MASK; + value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT); + } + + if (dma_cfg->dche) + value |= DMA_BUS_MODE_DCHE; + + writel(value, ioaddr + DMA_BUS_MODE); + +} + +static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, + u32 *reg_space) +{ + reg_space[DMA_CHAN_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_CONTROL(channel)); + reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); + reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); + reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); + reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); + reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)); + reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)); + reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)); + reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)); + reg_space[DMA_CHAN_INTR_ENA(channel) / 4] = + readl(ioaddr + DMA_CHAN_INTR_ENA(channel)); + reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)); + reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] = + readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)); + reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)); + reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)); + reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)); + reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)); + reg_space[DMA_CHAN_STATUS(channel) / 4] = + readl(ioaddr + DMA_CHAN_STATUS(channel)); +} + +static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) + _dwmac4_dump_dma_regs(ioaddr, i, reg_space); +} + +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) +{ + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); +} + +static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + unsigned int rqs = fifosz / 256 - 1; + u32 mtl_rx_op; + + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + mtl_rx_op |= MTL_OP_MODE_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); + mtl_rx_op &= ~MTL_OP_MODE_RSF; + mtl_rx_op &= MTL_OP_MODE_RTC_MASK; + if (mode <= 32) + mtl_rx_op |= MTL_OP_MODE_RTC_32; + else if (mode <= 64) + mtl_rx_op |= MTL_OP_MODE_RTC_64; + else if (mode <= 96) + mtl_rx_op |= MTL_OP_MODE_RTC_96; + else + mtl_rx_op |= MTL_OP_MODE_RTC_128; + } + + mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; + mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; + + /* Enable flow control only if each channel gets 4 KiB or more FIFO and + * only if channel is not an AVB channel. + */ + if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { + unsigned int rfd, rfa; + + mtl_rx_op |= MTL_OP_MODE_EHFC; + + /* Set Threshold for Activating Flow Control to min 2 frames, + * i.e. 1500 * 2 = 3000 bytes. + * + * Set Threshold for Deactivating Flow Control to min 1 frame, + * i.e. 1500 bytes. + */ + switch (fifosz) { + case 4096: + /* This violates the above formula because of FIFO size + * limit therefore overflow may occur in spite of this. + */ + rfd = 0x03; /* Full-2.5K */ + rfa = 0x01; /* Full-1.5K */ + break; + + default: + rfd = 0x07; /* Full-4.5K */ + rfa = 0x04; /* Full-3K */ + break; + } + + mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK; + mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT; + + mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK; + mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT; + } + + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); +} + +static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + unsigned int tqs = fifosz / 256 - 1; + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + mtl_tx_op |= MTL_OP_MODE_TSF; + } else { + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); + mtl_tx_op &= ~MTL_OP_MODE_TSF; + mtl_tx_op &= MTL_OP_MODE_TTC_MASK; + /* Set the transmit threshold */ + if (mode <= 32) + mtl_tx_op |= MTL_OP_MODE_TTC_32; + else if (mode <= 64) + mtl_tx_op |= MTL_OP_MODE_TTC_64; + else if (mode <= 96) + mtl_tx_op |= MTL_OP_MODE_TTC_96; + else if (mode <= 128) + mtl_tx_op |= MTL_OP_MODE_TTC_128; + else if (mode <= 192) + mtl_tx_op |= MTL_OP_MODE_TTC_192; + else if (mode <= 256) + mtl_tx_op |= MTL_OP_MODE_TTC_256; + else if (mode <= 384) + mtl_tx_op |= MTL_OP_MODE_TTC_384; + else + mtl_tx_op |= MTL_OP_MODE_TTC_512; + } + /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO + * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE. + * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W + * with reset values: TXQEN off, TQS 256 bytes. + * + * TXQEN must be written for multi-channel operation and TQS must + * reflect the available fifo size per queue (total fifo size / number + * of enabled queues). + */ + mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; + if (qmode != MTL_QUEUE_AVB) + mtl_tx_op |= MTL_OP_MODE_TXQEN; + else + mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; + mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK; + mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT; + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); +} + +static int dwmac4_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) +{ + u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0); + + /* MAC HW feature0 */ + dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL); + dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1; + dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2; + dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4; + dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18; + dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3; + dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5; + dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6; + dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7; + /* MMC */ + dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8; + /* IEEE 1588-2008 */ + dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13; + /* TX and RX csum */ + dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14; + dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16; + dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27; + dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9; + + /* MAC HW feature1 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); + dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27; + dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24; + dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; + dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; + dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17; + + dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14; + switch (dma_cap->addr64) { + case 0: + dma_cap->addr64 = 32; + break; + case 1: + dma_cap->addr64 = 40; + break; + case 2: + dma_cap->addr64 = 48; + break; + default: + dma_cap->addr64 = 32; + break; + } + + /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by + * shifting and store the sizes in bytes. + */ + dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6); + dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0); + /* MAC HW feature2 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); + /* TX and RX number of channels */ + dma_cap->number_rx_channel = + ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; + dma_cap->number_tx_channel = + ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; + /* TX and RX number of queues */ + dma_cap->number_rx_queues = + ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; + dma_cap->number_tx_queues = + ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; + /* PPS output */ + dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24; + + /* IEEE 1588-2002 */ + dma_cap->time_stamp = 0; + /* Number of Auxiliary Snapshot Inputs */ + dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28; + + /* MAC HW feature3 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); + + /* 5.10 Features */ + dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28; + dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27; + dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26; + dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20; + dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17; + dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16; + dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13; + dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11; + dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10; + dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5; + + return 0; +} + +/* Enable/disable TSO feature and set MSS */ +static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value; + + if (en) { + /* enable TSO */ + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value | DMA_CONTROL_TSE, + ioaddr + DMA_CHAN_TX_CONTROL(chan)); + } else { + /* enable TSO */ + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value & ~DMA_CONTROL_TSE, + ioaddr + DMA_CHAN_TX_CONTROL(chan)); + } +} + +static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; + if (qmode != MTL_QUEUE_AVB) + mtl_tx_op |= MTL_OP_MODE_TXQEN; + else + mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); +} + +static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value &= ~DMA_RBSZ_MASK; + value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; + + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); +} + +static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + GMAC_EXT_CONFIG); + + value &= ~GMAC_CONFIG_HDSMS; + value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ + writel(value, ioaddr + GMAC_EXT_CONFIG); + + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (en) + value |= DMA_CONTROL_SPH; + else + value &= ~DMA_CONTROL_SPH; + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); +} + +static int dwmac4_enable_tbs(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + if (en) + value |= DMA_CONTROL_EDSE; + else + value &= ~DMA_CONTROL_EDSE; + + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE; + if (en && !value) + return -EIO; + + writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL); + return 0; +} + +const struct stmmac_dma_ops dwmac4_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, + .init_chan = dwmac4_dma_init_channel, + .init_rx_chan = dwmac4_dma_init_rx_chan, + .init_tx_chan = dwmac4_dma_init_tx_chan, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, + .enable_dma_irq = dwmac4_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, + .stop_tx = dwmac4_dma_stop_tx, + .start_rx = dwmac4_dma_start_rx, + .stop_rx = dwmac4_dma_stop_rx, + .dma_interrupt = dwmac4_dma_interrupt, + .get_hw_feature = dwmac4_get_hw_feature, + .rx_watchdog = dwmac4_rx_watchdog, + .set_rx_ring_len = dwmac4_set_rx_ring_len, + .set_tx_ring_len = dwmac4_set_tx_ring_len, + .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, + .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, + .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, + .set_bfsize = dwmac4_set_bfsize, + .enable_sph = dwmac4_enable_sph, +}; + +const struct stmmac_dma_ops dwmac410_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, + .init_chan = dwmac410_dma_init_channel, + .init_rx_chan = dwmac4_dma_init_rx_chan, + .init_tx_chan = dwmac4_dma_init_tx_chan, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, + .enable_dma_irq = dwmac410_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, + .stop_tx = dwmac4_dma_stop_tx, + .start_rx = dwmac4_dma_start_rx, + .stop_rx = dwmac4_dma_stop_rx, + .dma_interrupt = dwmac4_dma_interrupt, + .get_hw_feature = dwmac4_get_hw_feature, + .rx_watchdog = dwmac4_rx_watchdog, + .set_rx_ring_len = dwmac4_set_rx_ring_len, + .set_tx_ring_len = dwmac4_set_tx_ring_len, + .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, + .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, + .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, + .set_bfsize = dwmac4_set_bfsize, + .enable_sph = dwmac4_enable_sph, + .enable_tbs = dwmac4_enable_tbs, +}; diff --git a/devices/stmmac/dwmac4_dma-6.1-ethercat.h b/devices/stmmac/dwmac4_dma-6.1-ethercat.h new file mode 100644 index 00000000..9321879b --- /dev/null +++ b/devices/stmmac/dwmac4_dma-6.1-ethercat.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DWMAC4 DMA Header file. + * + * Copyright (C) 2007-2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#ifndef __DWMAC4_DMA_H__ +#define __DWMAC4_DMA_H__ + +/* Define the max channel number used for tx (also rx). + * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX + */ +#define DMA_CHANNEL_NB_MAX 1 + +#define DMA_BUS_MODE 0x00001000 +#define DMA_SYS_BUS_MODE 0x00001004 +#define DMA_STATUS 0x00001008 +#define DMA_DEBUG_STATUS_0 0x0000100c +#define DMA_DEBUG_STATUS_1 0x00001010 +#define DMA_DEBUG_STATUS_2 0x00001014 +#define DMA_AXI_BUS_MODE 0x00001028 +#define DMA_TBS_CTRL 0x00001050 + +/* DMA Bus Mode bitmap */ +#define DMA_BUS_MODE_DCHE BIT(19) +#define DMA_BUS_MODE_INTM_MASK GENMASK(17, 16) +#define DMA_BUS_MODE_INTM_SHIFT 16 +#define DMA_BUS_MODE_INTM_MODE1 0x1 +#define DMA_BUS_MODE_SFT_RESET BIT(0) + +/* DMA SYS Bus Mode bitmap */ +#define DMA_BUS_MODE_SPH BIT(24) +#define DMA_BUS_MODE_PBL BIT(16) +#define DMA_BUS_MODE_PBL_SHIFT 16 +#define DMA_BUS_MODE_RPBL_SHIFT 16 +#define DMA_BUS_MODE_MB BIT(14) +#define DMA_BUS_MODE_FB BIT(0) + +/* DMA Interrupt top status */ +#define DMA_STATUS_MAC BIT(17) +#define DMA_STATUS_MTL BIT(16) +#define DMA_STATUS_CHAN7 BIT(7) +#define DMA_STATUS_CHAN6 BIT(6) +#define DMA_STATUS_CHAN5 BIT(5) +#define DMA_STATUS_CHAN4 BIT(4) +#define DMA_STATUS_CHAN3 BIT(3) +#define DMA_STATUS_CHAN2 BIT(2) +#define DMA_STATUS_CHAN1 BIT(1) +#define DMA_STATUS_CHAN0 BIT(0) + +/* DMA debug status bitmap */ +#define DMA_DEBUG_STATUS_TS_MASK 0xf +#define DMA_DEBUG_STATUS_RS_MASK 0xf + +/* DMA AXI bitmap */ +#define DMA_AXI_EN_LPI BIT(31) +#define DMA_AXI_LPI_XIT_FRM BIT(30) +#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24) +#define DMA_AXI_WR_OSR_LMT_SHIFT 24 +#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16) +#define DMA_AXI_RD_OSR_LMT_SHIFT 16 + +#define DMA_AXI_OSR_MAX 0xf +#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \ + (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT)) + +#define DMA_SYS_BUS_MB BIT(14) +#define DMA_AXI_1KBBE BIT(13) +#define DMA_SYS_BUS_AAL BIT(12) +#define DMA_SYS_BUS_EAME BIT(11) +#define DMA_AXI_BLEN256 BIT(7) +#define DMA_AXI_BLEN128 BIT(6) +#define DMA_AXI_BLEN64 BIT(5) +#define DMA_AXI_BLEN32 BIT(4) +#define DMA_AXI_BLEN16 BIT(3) +#define DMA_AXI_BLEN8 BIT(2) +#define DMA_AXI_BLEN4 BIT(1) +#define DMA_SYS_BUS_FB BIT(0) + +#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ + DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \ + DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ + DMA_AXI_BLEN4) + +#define DMA_AXI_BURST_LEN_MASK 0x000000FE + +/* DMA TBS Control */ +#define DMA_TBS_FTOS GENMASK(31, 8) +#define DMA_TBS_FTOV BIT(0) +#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV) + +/* Following DMA defines are chanels oriented */ +#define DMA_CHAN_BASE_ADDR 0x00001100 +#define DMA_CHAN_BASE_OFFSET 0x80 +#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \ + (x * DMA_CHAN_BASE_OFFSET)) +#define DMA_CHAN_REG_NUMBER 17 + +#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x) +#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4) +#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8) +#define DMA_CHAN_TX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x10) +#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14) +#define DMA_CHAN_RX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x18) +#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c) +#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20) +#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28) +#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c) +#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30) +#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34) +#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38) +#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c) +#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44) +#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c) +#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54) +#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c) +#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60) + +/* DMA Control X */ +#define DMA_CONTROL_SPH BIT(24) +#define DMA_CONTROL_MSS_MASK GENMASK(13, 0) + +/* DMA Tx Channel X Control register defines */ +#define DMA_CONTROL_EDSE BIT(28) +#define DMA_CONTROL_TSE BIT(12) +#define DMA_CONTROL_OSP BIT(4) +#define DMA_CONTROL_ST BIT(0) + +/* DMA Rx Channel X Control register defines */ +#define DMA_CONTROL_SR BIT(0) +#define DMA_RBSZ_MASK GENMASK(14, 1) +#define DMA_RBSZ_SHIFT 1 + +/* Interrupt status per channel */ +#define DMA_CHAN_STATUS_REB GENMASK(21, 19) +#define DMA_CHAN_STATUS_REB_SHIFT 19 +#define DMA_CHAN_STATUS_TEB GENMASK(18, 16) +#define DMA_CHAN_STATUS_TEB_SHIFT 16 +#define DMA_CHAN_STATUS_NIS BIT(15) +#define DMA_CHAN_STATUS_AIS BIT(14) +#define DMA_CHAN_STATUS_CDE BIT(13) +#define DMA_CHAN_STATUS_FBE BIT(12) +#define DMA_CHAN_STATUS_ERI BIT(11) +#define DMA_CHAN_STATUS_ETI BIT(10) +#define DMA_CHAN_STATUS_RWT BIT(9) +#define DMA_CHAN_STATUS_RPS BIT(8) +#define DMA_CHAN_STATUS_RBU BIT(7) +#define DMA_CHAN_STATUS_RI BIT(6) +#define DMA_CHAN_STATUS_TBU BIT(2) +#define DMA_CHAN_STATUS_TPS BIT(1) +#define DMA_CHAN_STATUS_TI BIT(0) + +#define DMA_CHAN_STATUS_MSK_COMMON (DMA_CHAN_STATUS_NIS | \ + DMA_CHAN_STATUS_AIS | \ + DMA_CHAN_STATUS_CDE | \ + DMA_CHAN_STATUS_FBE) + +#define DMA_CHAN_STATUS_MSK_RX (DMA_CHAN_STATUS_REB | \ + DMA_CHAN_STATUS_ERI | \ + DMA_CHAN_STATUS_RWT | \ + DMA_CHAN_STATUS_RPS | \ + DMA_CHAN_STATUS_RBU | \ + DMA_CHAN_STATUS_RI | \ + DMA_CHAN_STATUS_MSK_COMMON) + +#define DMA_CHAN_STATUS_MSK_TX (DMA_CHAN_STATUS_ETI | \ + DMA_CHAN_STATUS_TBU | \ + DMA_CHAN_STATUS_TPS | \ + DMA_CHAN_STATUS_TI | \ + DMA_CHAN_STATUS_MSK_COMMON) + +/* Interrupt enable bits per channel */ +#define DMA_CHAN_INTR_ENA_NIE BIT(16) +#define DMA_CHAN_INTR_ENA_AIE BIT(15) +#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15) +#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14) +#define DMA_CHAN_INTR_ENA_CDE BIT(13) +#define DMA_CHAN_INTR_ENA_FBE BIT(12) +#define DMA_CHAN_INTR_ENA_ERE BIT(11) +#define DMA_CHAN_INTR_ENA_ETE BIT(10) +#define DMA_CHAN_INTR_ENA_RWE BIT(9) +#define DMA_CHAN_INTR_ENA_RSE BIT(8) +#define DMA_CHAN_INTR_ENA_RBUE BIT(7) +#define DMA_CHAN_INTR_ENA_RIE BIT(6) +#define DMA_CHAN_INTR_ENA_TBUE BIT(2) +#define DMA_CHAN_INTR_ENA_TSE BIT(1) +#define DMA_CHAN_INTR_ENA_TIE BIT(0) + +#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \ + DMA_CHAN_INTR_ENA_RIE | \ + DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \ + DMA_CHAN_INTR_ENA_FBE) +/* DMA default interrupt mask for 4.00 */ +#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \ + DMA_CHAN_INTR_ABNORMAL) +#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE) +#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \ + DMA_CHAN_INTR_ENA_RIE | \ + DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \ + DMA_CHAN_INTR_ENA_FBE) +/* DMA default interrupt mask for 4.10a */ +#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \ + DMA_CHAN_INTR_ABNORMAL_4_10) +#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE) +#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE) + +/* channel 0 specific fields */ +#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12) +#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12 +#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8) +#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8 + +int dwmac4_dma_reset(void __iomem *ioaddr); +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan); +int dwmac4_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, u32 dir); +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); +void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); +void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + +#endif /* __DWMAC4_DMA_H__ */ diff --git a/devices/stmmac/dwmac4_dma-6.1-orig.c b/devices/stmmac/dwmac4_dma-6.1-orig.c new file mode 100644 index 00000000..d99fa028 --- /dev/null +++ b/devices/stmmac/dwmac4_dma-6.1-orig.c @@ -0,0 +1,577 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + * DWC Ether MAC version 4.xx has been used for developing this code. + * + * This contains the functions to handle the dma. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#include +#include "dwmac4.h" +#include "dwmac4_dma.h" + +static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); + int i; + + pr_info("dwmac4: Master AXI performs %s burst length\n", + (value & DMA_SYS_BUS_FB) ? "fixed" : "any"); + + if (axi->axi_lpi_en) + value |= DMA_AXI_EN_LPI; + if (axi->axi_xit_frm) + value |= DMA_AXI_LPI_XIT_FRM; + + value &= ~DMA_AXI_WR_OSR_LMT; + value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) << + DMA_AXI_WR_OSR_LMT_SHIFT; + + value &= ~DMA_AXI_RD_OSR_LMT; + value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) << + DMA_AXI_RD_OSR_LMT_SHIFT; + + /* Depending on the UNDEF bit the Master AXI will perform any burst + * length according to the BLEN programmed (by default all BLEN are + * set). + */ + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= DMA_AXI_BLEN256; + break; + case 128: + value |= DMA_AXI_BLEN128; + break; + case 64: + value |= DMA_AXI_BLEN64; + break; + case 32: + value |= DMA_AXI_BLEN32; + break; + case 16: + value |= DMA_AXI_BLEN16; + break; + case 8: + value |= DMA_AXI_BLEN8; + break; + case 4: + value |= DMA_AXI_BLEN4; + break; + } + } + + writel(value, ioaddr + DMA_SYS_BUS_MODE); +} + +static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_rx_phy, u32 chan) +{ + u32 value; + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) + writel(upper_32_bits(dma_rx_phy), + ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan)); + + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); +} + +static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t dma_tx_phy, u32 chan) +{ + u32 value; + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); + + /* Enable OSP to get best performance */ + value |= DMA_CONTROL_OSP; + + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame)) + writel(upper_32_bits(dma_tx_phy), + ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan)); + + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); +} + +static void dwmac4_dma_init_channel(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value; + + /* common channel control register config */ + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (dma_cfg->pblx8) + value = value | DMA_BUS_MODE_PBL; + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_CHAN_INTR_DEFAULT_MASK, + ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +static void dwmac410_dma_init_channel(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value; + + /* common channel control register config */ + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (dma_cfg->pblx8) + value = value | DMA_BUS_MODE_PBL; + + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, + ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +static void dwmac4_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) +{ + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); + + /* Set the Fixed burst mode */ + if (dma_cfg->fixed_burst) + value |= DMA_SYS_BUS_FB; + + /* Mixed Burst has no effect when fb is set */ + if (dma_cfg->mixed_burst) + value |= DMA_SYS_BUS_MB; + + if (dma_cfg->aal) + value |= DMA_SYS_BUS_AAL; + + if (dma_cfg->eame) + value |= DMA_SYS_BUS_EAME; + + writel(value, ioaddr + DMA_SYS_BUS_MODE); + + value = readl(ioaddr + DMA_BUS_MODE); + + if (dma_cfg->multi_msi_en) { + value &= ~DMA_BUS_MODE_INTM_MASK; + value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT); + } + + if (dma_cfg->dche) + value |= DMA_BUS_MODE_DCHE; + + writel(value, ioaddr + DMA_BUS_MODE); + +} + +static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, + u32 *reg_space) +{ + reg_space[DMA_CHAN_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_CONTROL(channel)); + reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); + reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); + reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); + reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); + reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)); + reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)); + reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)); + reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)); + reg_space[DMA_CHAN_INTR_ENA(channel) / 4] = + readl(ioaddr + DMA_CHAN_INTR_ENA(channel)); + reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)); + reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] = + readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)); + reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)); + reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)); + reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)); + reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)); + reg_space[DMA_CHAN_STATUS(channel) / 4] = + readl(ioaddr + DMA_CHAN_STATUS(channel)); +} + +static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) + _dwmac4_dump_dma_regs(ioaddr, i, reg_space); +} + +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) +{ + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); +} + +static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + unsigned int rqs = fifosz / 256 - 1; + u32 mtl_rx_op; + + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + mtl_rx_op |= MTL_OP_MODE_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); + mtl_rx_op &= ~MTL_OP_MODE_RSF; + mtl_rx_op &= MTL_OP_MODE_RTC_MASK; + if (mode <= 32) + mtl_rx_op |= MTL_OP_MODE_RTC_32; + else if (mode <= 64) + mtl_rx_op |= MTL_OP_MODE_RTC_64; + else if (mode <= 96) + mtl_rx_op |= MTL_OP_MODE_RTC_96; + else + mtl_rx_op |= MTL_OP_MODE_RTC_128; + } + + mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; + mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; + + /* Enable flow control only if each channel gets 4 KiB or more FIFO and + * only if channel is not an AVB channel. + */ + if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { + unsigned int rfd, rfa; + + mtl_rx_op |= MTL_OP_MODE_EHFC; + + /* Set Threshold for Activating Flow Control to min 2 frames, + * i.e. 1500 * 2 = 3000 bytes. + * + * Set Threshold for Deactivating Flow Control to min 1 frame, + * i.e. 1500 bytes. + */ + switch (fifosz) { + case 4096: + /* This violates the above formula because of FIFO size + * limit therefore overflow may occur in spite of this. + */ + rfd = 0x03; /* Full-2.5K */ + rfa = 0x01; /* Full-1.5K */ + break; + + default: + rfd = 0x07; /* Full-4.5K */ + rfa = 0x04; /* Full-3K */ + break; + } + + mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK; + mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT; + + mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK; + mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT; + } + + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); +} + +static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + unsigned int tqs = fifosz / 256 - 1; + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + mtl_tx_op |= MTL_OP_MODE_TSF; + } else { + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); + mtl_tx_op &= ~MTL_OP_MODE_TSF; + mtl_tx_op &= MTL_OP_MODE_TTC_MASK; + /* Set the transmit threshold */ + if (mode <= 32) + mtl_tx_op |= MTL_OP_MODE_TTC_32; + else if (mode <= 64) + mtl_tx_op |= MTL_OP_MODE_TTC_64; + else if (mode <= 96) + mtl_tx_op |= MTL_OP_MODE_TTC_96; + else if (mode <= 128) + mtl_tx_op |= MTL_OP_MODE_TTC_128; + else if (mode <= 192) + mtl_tx_op |= MTL_OP_MODE_TTC_192; + else if (mode <= 256) + mtl_tx_op |= MTL_OP_MODE_TTC_256; + else if (mode <= 384) + mtl_tx_op |= MTL_OP_MODE_TTC_384; + else + mtl_tx_op |= MTL_OP_MODE_TTC_512; + } + /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO + * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE. + * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W + * with reset values: TXQEN off, TQS 256 bytes. + * + * TXQEN must be written for multi-channel operation and TQS must + * reflect the available fifo size per queue (total fifo size / number + * of enabled queues). + */ + mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; + if (qmode != MTL_QUEUE_AVB) + mtl_tx_op |= MTL_OP_MODE_TXQEN; + else + mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; + mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK; + mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT; + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); +} + +static int dwmac4_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) +{ + u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0); + + /* MAC HW feature0 */ + dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL); + dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1; + dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2; + dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4; + dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18; + dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3; + dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5; + dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6; + dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7; + /* MMC */ + dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8; + /* IEEE 1588-2008 */ + dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13; + /* TX and RX csum */ + dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14; + dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16; + dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27; + dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9; + + /* MAC HW feature1 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); + dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27; + dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24; + dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; + dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; + dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17; + + dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14; + switch (dma_cap->addr64) { + case 0: + dma_cap->addr64 = 32; + break; + case 1: + dma_cap->addr64 = 40; + break; + case 2: + dma_cap->addr64 = 48; + break; + default: + dma_cap->addr64 = 32; + break; + } + + /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by + * shifting and store the sizes in bytes. + */ + dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6); + dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0); + /* MAC HW feature2 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); + /* TX and RX number of channels */ + dma_cap->number_rx_channel = + ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; + dma_cap->number_tx_channel = + ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; + /* TX and RX number of queues */ + dma_cap->number_rx_queues = + ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; + dma_cap->number_tx_queues = + ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; + /* PPS output */ + dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24; + + /* IEEE 1588-2002 */ + dma_cap->time_stamp = 0; + /* Number of Auxiliary Snapshot Inputs */ + dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28; + + /* MAC HW feature3 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); + + /* 5.10 Features */ + dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28; + dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27; + dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26; + dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20; + dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17; + dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16; + dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13; + dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11; + dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10; + dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5; + + return 0; +} + +/* Enable/disable TSO feature and set MSS */ +static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value; + + if (en) { + /* enable TSO */ + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value | DMA_CONTROL_TSE, + ioaddr + DMA_CHAN_TX_CONTROL(chan)); + } else { + /* enable TSO */ + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value & ~DMA_CONTROL_TSE, + ioaddr + DMA_CHAN_TX_CONTROL(chan)); + } +} + +static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; + if (qmode != MTL_QUEUE_AVB) + mtl_tx_op |= MTL_OP_MODE_TXQEN; + else + mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); +} + +static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value &= ~DMA_RBSZ_MASK; + value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; + + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); +} + +static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + GMAC_EXT_CONFIG); + + value &= ~GMAC_CONFIG_HDSMS; + value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ + writel(value, ioaddr + GMAC_EXT_CONFIG); + + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (en) + value |= DMA_CONTROL_SPH; + else + value &= ~DMA_CONTROL_SPH; + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); +} + +static int dwmac4_enable_tbs(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + if (en) + value |= DMA_CONTROL_EDSE; + else + value &= ~DMA_CONTROL_EDSE; + + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE; + if (en && !value) + return -EIO; + + writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL); + return 0; +} + +const struct stmmac_dma_ops dwmac4_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, + .init_chan = dwmac4_dma_init_channel, + .init_rx_chan = dwmac4_dma_init_rx_chan, + .init_tx_chan = dwmac4_dma_init_tx_chan, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, + .enable_dma_irq = dwmac4_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, + .stop_tx = dwmac4_dma_stop_tx, + .start_rx = dwmac4_dma_start_rx, + .stop_rx = dwmac4_dma_stop_rx, + .dma_interrupt = dwmac4_dma_interrupt, + .get_hw_feature = dwmac4_get_hw_feature, + .rx_watchdog = dwmac4_rx_watchdog, + .set_rx_ring_len = dwmac4_set_rx_ring_len, + .set_tx_ring_len = dwmac4_set_tx_ring_len, + .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, + .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, + .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, + .set_bfsize = dwmac4_set_bfsize, + .enable_sph = dwmac4_enable_sph, +}; + +const struct stmmac_dma_ops dwmac410_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, + .init_chan = dwmac410_dma_init_channel, + .init_rx_chan = dwmac4_dma_init_rx_chan, + .init_tx_chan = dwmac4_dma_init_tx_chan, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, + .enable_dma_irq = dwmac410_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, + .stop_tx = dwmac4_dma_stop_tx, + .start_rx = dwmac4_dma_start_rx, + .stop_rx = dwmac4_dma_stop_rx, + .dma_interrupt = dwmac4_dma_interrupt, + .get_hw_feature = dwmac4_get_hw_feature, + .rx_watchdog = dwmac4_rx_watchdog, + .set_rx_ring_len = dwmac4_set_rx_ring_len, + .set_tx_ring_len = dwmac4_set_tx_ring_len, + .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, + .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, + .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, + .set_bfsize = dwmac4_set_bfsize, + .enable_sph = dwmac4_enable_sph, + .enable_tbs = dwmac4_enable_tbs, +}; diff --git a/devices/stmmac/dwmac4_dma-6.1-orig.h b/devices/stmmac/dwmac4_dma-6.1-orig.h new file mode 100644 index 00000000..9321879b --- /dev/null +++ b/devices/stmmac/dwmac4_dma-6.1-orig.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DWMAC4 DMA Header file. + * + * Copyright (C) 2007-2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#ifndef __DWMAC4_DMA_H__ +#define __DWMAC4_DMA_H__ + +/* Define the max channel number used for tx (also rx). + * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX + */ +#define DMA_CHANNEL_NB_MAX 1 + +#define DMA_BUS_MODE 0x00001000 +#define DMA_SYS_BUS_MODE 0x00001004 +#define DMA_STATUS 0x00001008 +#define DMA_DEBUG_STATUS_0 0x0000100c +#define DMA_DEBUG_STATUS_1 0x00001010 +#define DMA_DEBUG_STATUS_2 0x00001014 +#define DMA_AXI_BUS_MODE 0x00001028 +#define DMA_TBS_CTRL 0x00001050 + +/* DMA Bus Mode bitmap */ +#define DMA_BUS_MODE_DCHE BIT(19) +#define DMA_BUS_MODE_INTM_MASK GENMASK(17, 16) +#define DMA_BUS_MODE_INTM_SHIFT 16 +#define DMA_BUS_MODE_INTM_MODE1 0x1 +#define DMA_BUS_MODE_SFT_RESET BIT(0) + +/* DMA SYS Bus Mode bitmap */ +#define DMA_BUS_MODE_SPH BIT(24) +#define DMA_BUS_MODE_PBL BIT(16) +#define DMA_BUS_MODE_PBL_SHIFT 16 +#define DMA_BUS_MODE_RPBL_SHIFT 16 +#define DMA_BUS_MODE_MB BIT(14) +#define DMA_BUS_MODE_FB BIT(0) + +/* DMA Interrupt top status */ +#define DMA_STATUS_MAC BIT(17) +#define DMA_STATUS_MTL BIT(16) +#define DMA_STATUS_CHAN7 BIT(7) +#define DMA_STATUS_CHAN6 BIT(6) +#define DMA_STATUS_CHAN5 BIT(5) +#define DMA_STATUS_CHAN4 BIT(4) +#define DMA_STATUS_CHAN3 BIT(3) +#define DMA_STATUS_CHAN2 BIT(2) +#define DMA_STATUS_CHAN1 BIT(1) +#define DMA_STATUS_CHAN0 BIT(0) + +/* DMA debug status bitmap */ +#define DMA_DEBUG_STATUS_TS_MASK 0xf +#define DMA_DEBUG_STATUS_RS_MASK 0xf + +/* DMA AXI bitmap */ +#define DMA_AXI_EN_LPI BIT(31) +#define DMA_AXI_LPI_XIT_FRM BIT(30) +#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24) +#define DMA_AXI_WR_OSR_LMT_SHIFT 24 +#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16) +#define DMA_AXI_RD_OSR_LMT_SHIFT 16 + +#define DMA_AXI_OSR_MAX 0xf +#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \ + (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT)) + +#define DMA_SYS_BUS_MB BIT(14) +#define DMA_AXI_1KBBE BIT(13) +#define DMA_SYS_BUS_AAL BIT(12) +#define DMA_SYS_BUS_EAME BIT(11) +#define DMA_AXI_BLEN256 BIT(7) +#define DMA_AXI_BLEN128 BIT(6) +#define DMA_AXI_BLEN64 BIT(5) +#define DMA_AXI_BLEN32 BIT(4) +#define DMA_AXI_BLEN16 BIT(3) +#define DMA_AXI_BLEN8 BIT(2) +#define DMA_AXI_BLEN4 BIT(1) +#define DMA_SYS_BUS_FB BIT(0) + +#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ + DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \ + DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ + DMA_AXI_BLEN4) + +#define DMA_AXI_BURST_LEN_MASK 0x000000FE + +/* DMA TBS Control */ +#define DMA_TBS_FTOS GENMASK(31, 8) +#define DMA_TBS_FTOV BIT(0) +#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV) + +/* Following DMA defines are chanels oriented */ +#define DMA_CHAN_BASE_ADDR 0x00001100 +#define DMA_CHAN_BASE_OFFSET 0x80 +#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \ + (x * DMA_CHAN_BASE_OFFSET)) +#define DMA_CHAN_REG_NUMBER 17 + +#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x) +#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4) +#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8) +#define DMA_CHAN_TX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x10) +#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14) +#define DMA_CHAN_RX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x18) +#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c) +#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20) +#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28) +#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c) +#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30) +#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34) +#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38) +#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c) +#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44) +#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c) +#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54) +#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c) +#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60) + +/* DMA Control X */ +#define DMA_CONTROL_SPH BIT(24) +#define DMA_CONTROL_MSS_MASK GENMASK(13, 0) + +/* DMA Tx Channel X Control register defines */ +#define DMA_CONTROL_EDSE BIT(28) +#define DMA_CONTROL_TSE BIT(12) +#define DMA_CONTROL_OSP BIT(4) +#define DMA_CONTROL_ST BIT(0) + +/* DMA Rx Channel X Control register defines */ +#define DMA_CONTROL_SR BIT(0) +#define DMA_RBSZ_MASK GENMASK(14, 1) +#define DMA_RBSZ_SHIFT 1 + +/* Interrupt status per channel */ +#define DMA_CHAN_STATUS_REB GENMASK(21, 19) +#define DMA_CHAN_STATUS_REB_SHIFT 19 +#define DMA_CHAN_STATUS_TEB GENMASK(18, 16) +#define DMA_CHAN_STATUS_TEB_SHIFT 16 +#define DMA_CHAN_STATUS_NIS BIT(15) +#define DMA_CHAN_STATUS_AIS BIT(14) +#define DMA_CHAN_STATUS_CDE BIT(13) +#define DMA_CHAN_STATUS_FBE BIT(12) +#define DMA_CHAN_STATUS_ERI BIT(11) +#define DMA_CHAN_STATUS_ETI BIT(10) +#define DMA_CHAN_STATUS_RWT BIT(9) +#define DMA_CHAN_STATUS_RPS BIT(8) +#define DMA_CHAN_STATUS_RBU BIT(7) +#define DMA_CHAN_STATUS_RI BIT(6) +#define DMA_CHAN_STATUS_TBU BIT(2) +#define DMA_CHAN_STATUS_TPS BIT(1) +#define DMA_CHAN_STATUS_TI BIT(0) + +#define DMA_CHAN_STATUS_MSK_COMMON (DMA_CHAN_STATUS_NIS | \ + DMA_CHAN_STATUS_AIS | \ + DMA_CHAN_STATUS_CDE | \ + DMA_CHAN_STATUS_FBE) + +#define DMA_CHAN_STATUS_MSK_RX (DMA_CHAN_STATUS_REB | \ + DMA_CHAN_STATUS_ERI | \ + DMA_CHAN_STATUS_RWT | \ + DMA_CHAN_STATUS_RPS | \ + DMA_CHAN_STATUS_RBU | \ + DMA_CHAN_STATUS_RI | \ + DMA_CHAN_STATUS_MSK_COMMON) + +#define DMA_CHAN_STATUS_MSK_TX (DMA_CHAN_STATUS_ETI | \ + DMA_CHAN_STATUS_TBU | \ + DMA_CHAN_STATUS_TPS | \ + DMA_CHAN_STATUS_TI | \ + DMA_CHAN_STATUS_MSK_COMMON) + +/* Interrupt enable bits per channel */ +#define DMA_CHAN_INTR_ENA_NIE BIT(16) +#define DMA_CHAN_INTR_ENA_AIE BIT(15) +#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15) +#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14) +#define DMA_CHAN_INTR_ENA_CDE BIT(13) +#define DMA_CHAN_INTR_ENA_FBE BIT(12) +#define DMA_CHAN_INTR_ENA_ERE BIT(11) +#define DMA_CHAN_INTR_ENA_ETE BIT(10) +#define DMA_CHAN_INTR_ENA_RWE BIT(9) +#define DMA_CHAN_INTR_ENA_RSE BIT(8) +#define DMA_CHAN_INTR_ENA_RBUE BIT(7) +#define DMA_CHAN_INTR_ENA_RIE BIT(6) +#define DMA_CHAN_INTR_ENA_TBUE BIT(2) +#define DMA_CHAN_INTR_ENA_TSE BIT(1) +#define DMA_CHAN_INTR_ENA_TIE BIT(0) + +#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \ + DMA_CHAN_INTR_ENA_RIE | \ + DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \ + DMA_CHAN_INTR_ENA_FBE) +/* DMA default interrupt mask for 4.00 */ +#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \ + DMA_CHAN_INTR_ABNORMAL) +#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE) +#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \ + DMA_CHAN_INTR_ENA_RIE | \ + DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \ + DMA_CHAN_INTR_ENA_FBE) +/* DMA default interrupt mask for 4.10a */ +#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \ + DMA_CHAN_INTR_ABNORMAL_4_10) +#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE) +#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE) + +/* channel 0 specific fields */ +#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12) +#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12 +#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8) +#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8 + +int dwmac4_dma_reset(void __iomem *ioaddr); +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan); +int dwmac4_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, u32 dir); +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); +void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); +void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + +#endif /* __DWMAC4_DMA_H__ */ diff --git a/devices/stmmac/dwmac4_lib-6.1-ethercat.c b/devices/stmmac/dwmac4_lib-6.1-ethercat.c new file mode 100644 index 00000000..5fecb611 --- /dev/null +++ b/devices/stmmac/dwmac4_lib-6.1-ethercat.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2007-2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#include +#include +#include +#include "common-6.1-ethercat.h" +#include "dwmac4_dma-6.1-ethercat.h" +#include "dwmac4-6.1-ethercat.h" + +int dwmac4_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + + return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, + !(value & DMA_BUS_MODE_SFT_RESET), + 10000, 1000000); +} + +void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) +{ + writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan)); +} + +void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) +{ + writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan)); +} + +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_TE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); +} + +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value |= DMA_CONTROL_SR; + + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_RE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value &= ~DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); +} + +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan)); +} + +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan)); +} + +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + + if (rx) + value |= DMA_CHAN_INTR_DEFAULT_RX; + if (tx) + value |= DMA_CHAN_INTR_DEFAULT_TX; + + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + + if (rx) + value |= DMA_CHAN_INTR_DEFAULT_RX_4_10; + if (tx) + value |= DMA_CHAN_INTR_DEFAULT_TX_4_10; + + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + + if (rx) + value &= ~DMA_CHAN_INTR_DEFAULT_RX; + if (tx) + value &= ~DMA_CHAN_INTR_DEFAULT_TX; + + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + + if (rx) + value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10; + if (tx) + value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10; + + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +int dwmac4_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, u32 dir) +{ + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); + u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + int ret = 0; + + if (dir == DMA_DIR_RX) + intr_status &= DMA_CHAN_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_CHAN_STATUS_MSK_TX; + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { + if (unlikely(intr_status & DMA_CHAN_STATUS_RBU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & DMA_CHAN_STATUS_NIS)) + x->normal_irq_n++; + if (likely(intr_status & DMA_CHAN_STATUS_RI)) { + x->rx_normal_irq_n++; + x->rxq_stats[chan].rx_normal_irq_n++; + ret |= handle_rx; + } + if (likely(intr_status & DMA_CHAN_STATUS_TI)) { + x->tx_normal_irq_n++; + x->txq_stats[chan].tx_normal_irq_n++; + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_CHAN_STATUS_TBU)) + ret |= handle_tx; + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) + x->rx_early_irq++; + + writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan)); + return ret; +} + +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], + unsigned int high, unsigned int low) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + /* For MAC Addr registers se have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT); + writel(data | GMAC_HI_REG_AE, ioaddr + high); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); +} + +/* Enable disable MAC RX/TX */ +void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + GMAC_CONFIG); + u32 old_val = value; + + if (enable) + value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE; + else + value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE); + + if (value != old_val) + writel(value, ioaddr + GMAC_CONFIG); +} + +void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low) +{ + unsigned int hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + high); + lo_addr = readl(ioaddr + low); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} diff --git a/devices/stmmac/dwmac4_lib-6.1-orig.c b/devices/stmmac/dwmac4_lib-6.1-orig.c new file mode 100644 index 00000000..7c26394f --- /dev/null +++ b/devices/stmmac/dwmac4_lib-6.1-orig.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2007-2015 STMicroelectronics Ltd + * + * Author: Alexandre Torgue + */ + +#include +#include +#include +#include "common.h" +#include "dwmac4_dma.h" +#include "dwmac4.h" + +int dwmac4_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + + return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, + !(value & DMA_BUS_MODE_SFT_RESET), + 10000, 1000000); +} + +void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) +{ + writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan)); +} + +void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) +{ + writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan)); +} + +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_TE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); +} + +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value |= DMA_CONTROL_SR; + + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_RE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value &= ~DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); +} + +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan)); +} + +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan)); +} + +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + + if (rx) + value |= DMA_CHAN_INTR_DEFAULT_RX; + if (tx) + value |= DMA_CHAN_INTR_DEFAULT_TX; + + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + + if (rx) + value |= DMA_CHAN_INTR_DEFAULT_RX_4_10; + if (tx) + value |= DMA_CHAN_INTR_DEFAULT_TX_4_10; + + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + + if (rx) + value &= ~DMA_CHAN_INTR_DEFAULT_RX; + if (tx) + value &= ~DMA_CHAN_INTR_DEFAULT_TX; + + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + + if (rx) + value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10; + if (tx) + value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10; + + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); +} + +int dwmac4_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, u32 dir) +{ + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); + u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + int ret = 0; + + if (dir == DMA_DIR_RX) + intr_status &= DMA_CHAN_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_CHAN_STATUS_MSK_TX; + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { + if (unlikely(intr_status & DMA_CHAN_STATUS_RBU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & DMA_CHAN_STATUS_NIS)) + x->normal_irq_n++; + if (likely(intr_status & DMA_CHAN_STATUS_RI)) { + x->rx_normal_irq_n++; + x->rxq_stats[chan].rx_normal_irq_n++; + ret |= handle_rx; + } + if (likely(intr_status & DMA_CHAN_STATUS_TI)) { + x->tx_normal_irq_n++; + x->txq_stats[chan].tx_normal_irq_n++; + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_CHAN_STATUS_TBU)) + ret |= handle_tx; + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) + x->rx_early_irq++; + + writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan)); + return ret; +} + +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], + unsigned int high, unsigned int low) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + /* For MAC Addr registers se have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT); + writel(data | GMAC_HI_REG_AE, ioaddr + high); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); +} + +/* Enable disable MAC RX/TX */ +void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + GMAC_CONFIG); + u32 old_val = value; + + if (enable) + value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE; + else + value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE); + + if (value != old_val) + writel(value, ioaddr + GMAC_CONFIG); +} + +void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low) +{ + unsigned int hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + high); + lo_addr = readl(ioaddr + low); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} diff --git a/devices/stmmac/dwmac5-6.1-ethercat.c b/devices/stmmac/dwmac5-6.1-ethercat.c new file mode 100644 index 00000000..98aaf49a --- /dev/null +++ b/devices/stmmac/dwmac5-6.1-ethercat.c @@ -0,0 +1,777 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates. +// stmmac Support for 5.xx Ethernet QoS cores + +#include +#include +#include "common-6.1-ethercat.h" +#include "dwmac4-6.1-ethercat.h" +#include "dwmac5-6.1-ethercat.h" +#include "stmmac-6.1-ethercat.h" +#include "stmmac_ptp-6.1-ethercat.h" + +struct dwmac5_error_desc { + bool valid; + const char *desc; + const char *detailed_desc; +}; + +#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field) + +static void dwmac5_log_error(struct net_device *ndev, u32 value, bool corr, + const char *module_name, const struct dwmac5_error_desc *desc, + unsigned long field_offset, struct stmmac_safety_stats *stats) +{ + unsigned long loc, mask; + u8 *bptr = (u8 *)stats; + unsigned long *ptr; + + ptr = (unsigned long *)(bptr + field_offset); + + mask = value; + for_each_set_bit(loc, &mask, 32) { + netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ? + "correctable" : "uncorrectable", module_name, + desc[loc].desc, desc[loc].detailed_desc); + + /* Update counters */ + ptr[loc]++; + } +} + +static const struct dwmac5_error_desc dwmac5_mac_errors[32]= { + { true, "ATPES", "Application Transmit Interface Parity Check Error" }, + { true, "TPES", "TSO Data Path Parity Check Error" }, + { true, "RDPES", "Read Descriptor Parity Check Error" }, + { true, "MPES", "MTL Data Path Parity Check Error" }, + { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" }, + { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" }, + { true, "CWPES", "CSR Write Data Path Parity Check Error" }, + { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" }, + { true, "TTES", "TX FSM Timeout Error" }, + { true, "RTES", "RX FSM Timeout Error" }, + { true, "CTES", "CSR FSM Timeout Error" }, + { true, "ATES", "APP FSM Timeout Error" }, + { true, "PTES", "PTP FSM Timeout Error" }, + { true, "T125ES", "TX125 FSM Timeout Error" }, + { true, "R125ES", "RX125 FSM Timeout Error" }, + { true, "RVCTES", "REV MDC FSM Timeout Error" }, + { true, "MSTTES", "Master Read/Write Timeout Error" }, + { true, "SLVTES", "Slave Read/Write Timeout Error" }, + { true, "ATITES", "Application Timeout on ATI Interface Error" }, + { true, "ARITES", "Application Timeout on ARI Interface Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { true, "FSMPES", "FSM State Parity Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwmac5_handle_mac_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + MAC_DPP_FSM_INT_STATUS); + writel(value, ioaddr + MAC_DPP_FSM_INT_STATUS); + + dwmac5_log_error(ndev, value, correctable, "MAC", dwmac5_mac_errors, + STAT_OFF(mac_errors), stats); +} + +static const struct dwmac5_error_desc dwmac5_mtl_errors[32]= { + { true, "TXCES", "MTL TX Memory Error" }, + { true, "TXAMS", "MTL TX Memory Address Mismatch Error" }, + { true, "TXUES", "MTL TX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { true, "RXCES", "MTL RX Memory Error" }, + { true, "RXAMS", "MTL RX Memory Address Mismatch Error" }, + { true, "RXUES", "MTL RX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { true, "ECES", "MTL EST Memory Error" }, + { true, "EAMS", "MTL EST Memory Address Mismatch Error" }, + { true, "EUES", "MTL EST Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { true, "RPCES", "MTL RX Parser Memory Error" }, + { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" }, + { true, "RPUES", "MTL RX Parser Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwmac5_handle_mtl_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + MTL_ECC_INT_STATUS); + writel(value, ioaddr + MTL_ECC_INT_STATUS); + + dwmac5_log_error(ndev, value, correctable, "MTL", dwmac5_mtl_errors, + STAT_OFF(mtl_errors), stats); +} + +static const struct dwmac5_error_desc dwmac5_dma_errors[32]= { + { true, "TCES", "DMA TSO Memory Error" }, + { true, "TAMS", "DMA TSO Memory Address Mismatch Error" }, + { true, "TUES", "DMA TSO Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { false, "UNKNOWN", "Unknown Error" }, /* 4 */ + { false, "UNKNOWN", "Unknown Error" }, /* 5 */ + { false, "UNKNOWN", "Unknown Error" }, /* 6 */ + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { false, "UNKNOWN", "Unknown Error" }, /* 8 */ + { false, "UNKNOWN", "Unknown Error" }, /* 9 */ + { false, "UNKNOWN", "Unknown Error" }, /* 10 */ + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { false, "UNKNOWN", "Unknown Error" }, /* 12 */ + { false, "UNKNOWN", "Unknown Error" }, /* 13 */ + { false, "UNKNOWN", "Unknown Error" }, /* 14 */ + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwmac5_handle_dma_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + DMA_ECC_INT_STATUS); + writel(value, ioaddr + DMA_ECC_INT_STATUS); + + dwmac5_log_error(ndev, value, correctable, "DMA", dwmac5_dma_errors, + STAT_OFF(dma_errors), stats); +} + +int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_feature_cfg *safety_feat_cfg) +{ + struct stmmac_safety_feature_cfg all_safety_feats = { + .tsoee = 1, + .mrxpee = 1, + .mestee = 1, + .mrxee = 1, + .mtxee = 1, + .epsi = 1, + .edpp = 1, + .prtyen = 1, + .tmouten = 1, + }; + u32 value; + + if (!asp) + return -EINVAL; + + if (!safety_feat_cfg) + safety_feat_cfg = &all_safety_feats; + + /* 1. Enable Safety Features */ + value = readl(ioaddr + MTL_ECC_CONTROL); + value |= MEEAO; /* MTL ECC Error Addr Status Override */ + if (safety_feat_cfg->tsoee) + value |= TSOEE; /* TSO ECC */ + if (safety_feat_cfg->mrxpee) + value |= MRXPEE; /* MTL RX Parser ECC */ + if (safety_feat_cfg->mestee) + value |= MESTEE; /* MTL EST ECC */ + if (safety_feat_cfg->mrxee) + value |= MRXEE; /* MTL RX FIFO ECC */ + if (safety_feat_cfg->mtxee) + value |= MTXEE; /* MTL TX FIFO ECC */ + writel(value, ioaddr + MTL_ECC_CONTROL); + + /* 2. Enable MTL Safety Interrupts */ + value = readl(ioaddr + MTL_ECC_INT_ENABLE); + value |= RPCEIE; /* RX Parser Memory Correctable Error */ + value |= ECEIE; /* EST Memory Correctable Error */ + value |= RXCEIE; /* RX Memory Correctable Error */ + value |= TXCEIE; /* TX Memory Correctable Error */ + writel(value, ioaddr + MTL_ECC_INT_ENABLE); + + /* 3. Enable DMA Safety Interrupts */ + value = readl(ioaddr + DMA_ECC_INT_ENABLE); + value |= TCEIE; /* TSO Memory Correctable Error */ + writel(value, ioaddr + DMA_ECC_INT_ENABLE); + + /* Only ECC Protection for External Memory feature is selected */ + if (asp <= 0x1) + return 0; + + /* 5. Enable Parity and Timeout for FSM */ + value = readl(ioaddr + MAC_FSM_CONTROL); + if (safety_feat_cfg->prtyen) + value |= PRTYEN; /* FSM Parity Feature */ + if (safety_feat_cfg->tmouten) + value |= TMOUTEN; /* FSM Timeout Feature */ + writel(value, ioaddr + MAC_FSM_CONTROL); + + /* 4. Enable Data Parity Protection */ + value = readl(ioaddr + MTL_DPP_CONTROL); + if (safety_feat_cfg->edpp) + value |= EDPP; + writel(value, ioaddr + MTL_DPP_CONTROL); + + /* + * All the Automotive Safety features are selected without the "Parity + * Port Enable for external interface" feature. + */ + if (asp <= 0x2) + return 0; + + if (safety_feat_cfg->epsi) + value |= EPSI; + writel(value, ioaddr + MTL_DPP_CONTROL); + return 0; +} + +int dwmac5_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats) +{ + bool err, corr; + u32 mtl, dma; + int ret = 0; + + if (!asp) + return -EINVAL; + + mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS); + dma = readl(ioaddr + DMA_SAFETY_INT_STATUS); + + err = (mtl & MCSIS) || (dma & MCSIS); + corr = false; + if (err) { + dwmac5_handle_mac_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = (mtl & (MEUIS | MECIS)) || (dma & (MSUIS | MSCIS)); + corr = (mtl & MECIS) || (dma & MSCIS); + if (err) { + dwmac5_handle_mtl_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = dma & (DEUIS | DECIS); + corr = dma & DECIS; + if (err) { + dwmac5_handle_dma_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + return ret; +} + +static const struct dwmac5_error { + const struct dwmac5_error_desc *desc; +} dwmac5_all_errors[] = { + { dwmac5_mac_errors }, + { dwmac5_mtl_errors }, + { dwmac5_dma_errors }, +}; + +int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc) +{ + int module = index / 32, offset = index % 32; + unsigned long *ptr = (unsigned long *)stats; + + if (module >= ARRAY_SIZE(dwmac5_all_errors)) + return -EINVAL; + if (!dwmac5_all_errors[module].desc[offset].valid) + return -EINVAL; + if (count) + *count = *(ptr + index); + if (desc) + *desc = dwmac5_all_errors[module].desc[offset].desc; + return 0; +} + +static int dwmac5_rxp_disable(void __iomem *ioaddr) +{ + u32 val; + + val = readl(ioaddr + MTL_OPERATION_MODE); + val &= ~MTL_FRPE; + writel(val, ioaddr + MTL_OPERATION_MODE); + + return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val, + val & RXPI, 1, 10000); +} + +static void dwmac5_rxp_enable(void __iomem *ioaddr) +{ + u32 val; + + val = readl(ioaddr + MTL_OPERATION_MODE); + val |= MTL_FRPE; + writel(val, ioaddr + MTL_OPERATION_MODE); +} + +static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr, + struct stmmac_tc_entry *entry, + int pos) +{ + int ret, i; + + for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) { + int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i; + u32 val; + + /* Wait for ready */ + ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS, + val, !(val & STARTBUSY), 1, 10000); + if (ret) + return ret; + + /* Write data */ + val = *((u32 *)&entry->val + i); + writel(val, ioaddr + MTL_RXP_IACC_DATA); + + /* Write pos */ + val = real_pos & ADDR; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Write OP */ + val |= WRRDN; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Start Write */ + val |= STARTBUSY; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Wait for done */ + ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS, + val, !(val & STARTBUSY), 1, 10000); + if (ret) + return ret; + } + + return 0; +} + +static struct stmmac_tc_entry * +dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count, + u32 curr_prio) +{ + struct stmmac_tc_entry *entry; + u32 min_prio = ~0x0; + int i, min_prio_idx; + bool found = false; + + for (i = count - 1; i >= 0; i--) { + entry = &entries[i]; + + /* Do not update unused entries */ + if (!entry->in_use) + continue; + /* Do not update already updated entries (i.e. fragments) */ + if (entry->in_hw) + continue; + /* Let last entry be updated last */ + if (entry->is_last) + continue; + /* Do not return fragments */ + if (entry->is_frag) + continue; + /* Check if we already checked this prio */ + if (entry->prio < curr_prio) + continue; + /* Check if this is the minimum prio */ + if (entry->prio < min_prio) { + min_prio = entry->prio; + min_prio_idx = i; + found = true; + } + } + + if (found) + return &entries[min_prio_idx]; + return NULL; +} + +int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count) +{ + struct stmmac_tc_entry *entry, *frag; + int i, ret, nve = 0; + u32 curr_prio = 0; + u32 old_val, val; + + /* Force disable RX */ + old_val = readl(ioaddr + GMAC_CONFIG); + val = old_val & ~GMAC_CONFIG_RE; + writel(val, ioaddr + GMAC_CONFIG); + + /* Disable RX Parser */ + ret = dwmac5_rxp_disable(ioaddr); + if (ret) + goto re_enable; + + /* Set all entries as NOT in HW */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + entry->in_hw = false; + } + + /* Update entries by reverse order */ + while (1) { + entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio); + if (!entry) + break; + + curr_prio = entry->prio; + frag = entry->frag_ptr; + + /* Set special fragment requirements */ + if (frag) { + entry->val.af = 0; + entry->val.rf = 0; + entry->val.nc = 1; + entry->val.ok_index = nve + 2; + } + + ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + entry->in_hw = true; + + if (frag && !frag->in_hw) { + ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve); + if (ret) + goto re_enable; + frag->table_pos = nve++; + frag->in_hw = true; + } + } + + if (!nve) + goto re_enable; + + /* Update all pass entry */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + if (!entry->is_last) + continue; + + ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + } + + /* Assume n. of parsable entries == n. of valid entries */ + val = (nve << 16) & NPE; + val |= nve & NVE; + writel(val, ioaddr + MTL_RXP_CONTROL_STATUS); + + /* Enable RX Parser */ + dwmac5_rxp_enable(ioaddr); + +re_enable: + /* Re-enable RX */ + writel(old_val, ioaddr + GMAC_CONFIG); + return ret; +} + +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags) +{ + u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + u32 val = readl(ioaddr + MAC_PPS_CONTROL); + u64 period; + + if (!cfg->available) + return -EINVAL; + if (tnsec & TRGTBUSY0) + return -EBUSY; + if (!sub_second_inc || !systime_flags) + return -EINVAL; + + val &= ~PPSx_MASK(index); + + if (!enable) { + val |= PPSCMDx(index, 0x5); + val |= PPSEN0; + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; + } + + val |= TRGTMODSELx(index, 0x2); + val |= PPSEN0; + writel(val, ioaddr + MAC_PPS_CONTROL); + + writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); + + if (!(systime_flags & PTP_TCR_TSCTRLSSR)) + cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; + writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + + period = cfg->period.tv_sec * 1000000000; + period += cfg->period.tv_nsec; + + do_div(period, sub_second_inc); + + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index)); + + period >>= 1; + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index)); + + /* Finally, activate it */ + val |= PPSCMDx(index, 0x2); + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; +} + +static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl) +{ + u32 ctrl; + + writel(val, ioaddr + MTL_EST_GCL_DATA); + + ctrl = (reg << ADDR_SHIFT); + ctrl |= gcl ? 0 : GCRR; + + writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL); + + ctrl |= SRWO; + writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL); + + return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL, + ctrl, !(ctrl & SRWO), 100, 5000); +} + +int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, + unsigned int ptp_rate) +{ + int i, ret = 0x0; + u32 ctrl; + + ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false); + ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false); + ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false); + ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false); + ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false); + ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false); + if (ret) + return ret; + + for (i = 0; i < cfg->gcl_size; i++) { + ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true); + if (ret) + return ret; + } + + ctrl = readl(ioaddr + MTL_EST_CONTROL); + ctrl &= ~PTOV; + ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT; + if (cfg->enable) + ctrl |= EEST | SSWL; + else + ctrl &= ~EEST; + + writel(ctrl, ioaddr + MTL_EST_CONTROL); + + /* Configure EST interrupt */ + if (cfg->enable) + ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC); + else + ctrl = 0; + + writel(ctrl, ioaddr + MTL_EST_INT_EN); + + return 0; +} + +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt) +{ + u32 status, value, feqn, hbfq, hbfs, btrl; + u32 txqcnt_mask = (1 << txqcnt) - 1; + + status = readl(ioaddr + MTL_EST_STATUS); + + value = (CGCE | HLBS | HLBF | BTRE | SWLC); + + /* Return if there is no error */ + if (!(status & value)) + return; + + if (status & CGCE) { + /* Clear Interrupt */ + writel(CGCE, ioaddr + MTL_EST_STATUS); + + x->mtl_est_cgce++; + } + + if (status & HLBS) { + value = readl(ioaddr + MTL_EST_SCH_ERR); + value &= txqcnt_mask; + + x->mtl_est_hlbs++; + + /* Clear Interrupt */ + writel(value, ioaddr + MTL_EST_SCH_ERR); + + /* Collecting info to shows all the queues that has HLBS + * issue. The only way to clear this is to clear the + * statistic + */ + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value); + } + + if (status & HLBF) { + value = readl(ioaddr + MTL_EST_FRM_SZ_ERR); + feqn = value & txqcnt_mask; + + value = readl(ioaddr + MTL_EST_FRM_SZ_CAP); + hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT; + hbfs = value & SZ_CAP_HBFS_MASK; + + x->mtl_est_hlbf++; + + /* Clear Interrupt */ + writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR); + + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n", + hbfq, hbfs); + } + + if (status & BTRE) { + if ((status & BTRL) == BTRL_MAX) + x->mtl_est_btrlm++; + else + x->mtl_est_btre++; + + btrl = (status & BTRL) >> BTRL_SHIFT; + + if (net_ratelimit()) + netdev_info(dev, "EST: BTR Error Loop Count %u\n", + btrl); + + writel(BTRE, ioaddr + MTL_EST_STATUS); + } + + if (status & SWLC) { + writel(SWLC, ioaddr + MTL_EST_STATUS); + netdev_info(dev, "EST: SWOL has been switched\n"); + } +} + +void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, + bool enable) +{ + u32 value; + + if (enable) { + cfg->fpe_csr = EFPE; + value = readl(ioaddr + GMAC_RXQ_CTRL1); + value &= ~GMAC_RXQCTRL_FPRQ; + value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT; + writel(value, ioaddr + GMAC_RXQ_CTRL1); + } else { + cfg->fpe_csr = 0; + } + writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS); +} + +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) +{ + u32 value; + int status; + + status = FPE_EVENT_UNKNOWN; + + /* Reads from the MAC_FPE_CTRL_STS register should only be performed + * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read" + */ + value = readl(ioaddr + MAC_FPE_CTRL_STS); + + if (value & TRSP) { + status |= FPE_EVENT_TRSP; + netdev_info(dev, "FPE: Respond mPacket is transmitted\n"); + } + + if (value & TVER) { + status |= FPE_EVENT_TVER; + netdev_info(dev, "FPE: Verify mPacket is transmitted\n"); + } + + if (value & RRSP) { + status |= FPE_EVENT_RRSP; + netdev_info(dev, "FPE: Respond mPacket is received\n"); + } + + if (value & RVER) { + status |= FPE_EVENT_RVER; + netdev_info(dev, "FPE: Verify mPacket is received\n"); + } + + return status; +} + +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + enum stmmac_mpacket_type type) +{ + u32 value = cfg->fpe_csr; + + if (type == MPACKET_VERIFY) + value |= SVER; + else if (type == MPACKET_RESPONSE) + value |= SRSP; + + writel(value, ioaddr + MAC_FPE_CTRL_STS); +} diff --git a/devices/stmmac/dwmac5-6.1-ethercat.h b/devices/stmmac/dwmac5-6.1-ethercat.h new file mode 100644 index 00000000..34e62079 --- /dev/null +++ b/devices/stmmac/dwmac5-6.1-ethercat.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates. +// stmmac Support for 5.xx Ethernet QoS cores + +#ifndef __DWMAC5_H__ +#define __DWMAC5_H__ + +#define MAC_DPP_FSM_INT_STATUS 0x00000140 +#define MAC_AXI_SLV_DPE_ADDR_STATUS 0x00000144 +#define MAC_FSM_CONTROL 0x00000148 +#define PRTYEN BIT(1) +#define TMOUTEN BIT(0) + +#define MAC_FPE_CTRL_STS 0x00000234 +#define TRSP BIT(19) +#define TVER BIT(18) +#define RRSP BIT(17) +#define RVER BIT(16) +#define SRSP BIT(2) +#define SVER BIT(1) +#define EFPE BIT(0) + +#define MAC_PPS_CONTROL 0x00000b70 +#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1) +#define PPS_MINIDX(x) ((x) * 8) +#define PPSx_MASK(x) GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x)) +#define MCGRENx(x) BIT(PPS_MAXIDX(x)) +#define TRGTMODSELx(x, val) \ + GENMASK(PPS_MAXIDX(x) - 1, PPS_MAXIDX(x) - 2) & \ + ((val) << (PPS_MAXIDX(x) - 2)) +#define PPSCMDx(x, val) \ + GENMASK(PPS_MINIDX(x) + 3, PPS_MINIDX(x)) & \ + ((val) << PPS_MINIDX(x)) +#define PPSEN0 BIT(4) +#define MAC_PPSx_TARGET_TIME_SEC(x) (0x00000b80 + ((x) * 0x10)) +#define MAC_PPSx_TARGET_TIME_NSEC(x) (0x00000b84 + ((x) * 0x10)) +#define TRGTBUSY0 BIT(31) +#define TTSL0 GENMASK(30, 0) +#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10)) +#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10)) + +#define MTL_EST_CONTROL 0x00000c50 +#define PTOV GENMASK(31, 24) +#define PTOV_SHIFT 24 +#define SSWL BIT(1) +#define EEST BIT(0) + +#define MTL_EST_STATUS 0x00000c58 +#define BTRL GENMASK(11, 8) +#define BTRL_SHIFT 8 +#define BTRL_MAX (0xF << BTRL_SHIFT) +#define SWOL BIT(7) +#define SWOL_SHIFT 7 +#define CGCE BIT(4) +#define HLBS BIT(3) +#define HLBF BIT(2) +#define BTRE BIT(1) +#define SWLC BIT(0) + +#define MTL_EST_SCH_ERR 0x00000c60 +#define MTL_EST_FRM_SZ_ERR 0x00000c64 +#define MTL_EST_FRM_SZ_CAP 0x00000c68 +#define SZ_CAP_HBFS_MASK GENMASK(14, 0) +#define SZ_CAP_HBFQ_SHIFT 16 +#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \ + ((val) > 4 ? GENMASK(18, 16) : \ + (val) > 2 ? GENMASK(17, 16) : \ + BIT(16)); }) + +#define MTL_EST_INT_EN 0x00000c70 +#define IECGCE CGCE +#define IEHS HLBS +#define IEHF HLBF +#define IEBE BTRE +#define IECC SWLC + +#define MTL_EST_GCL_CONTROL 0x00000c80 +#define BTR_LOW 0x0 +#define BTR_HIGH 0x1 +#define CTR_LOW 0x2 +#define CTR_HIGH 0x3 +#define TER 0x4 +#define LLR 0x5 +#define ADDR_SHIFT 8 +#define GCRR BIT(2) +#define SRWO BIT(0) +#define MTL_EST_GCL_DATA 0x00000c84 + +#define MTL_RXP_CONTROL_STATUS 0x00000ca0 +#define RXPI BIT(31) +#define NPE GENMASK(23, 16) +#define NVE GENMASK(7, 0) +#define MTL_RXP_IACC_CTRL_STATUS 0x00000cb0 +#define STARTBUSY BIT(31) +#define RXPEIEC GENMASK(22, 21) +#define RXPEIEE BIT(20) +#define WRRDN BIT(16) +#define ADDR GENMASK(15, 0) +#define MTL_RXP_IACC_DATA 0x00000cb4 +#define MTL_ECC_CONTROL 0x00000cc0 +#define MEEAO BIT(8) +#define TSOEE BIT(4) +#define MRXPEE BIT(3) +#define MESTEE BIT(2) +#define MRXEE BIT(1) +#define MTXEE BIT(0) + +#define MTL_SAFETY_INT_STATUS 0x00000cc4 +#define MCSIS BIT(31) +#define MEUIS BIT(1) +#define MECIS BIT(0) +#define MTL_ECC_INT_ENABLE 0x00000cc8 +#define RPCEIE BIT(12) +#define ECEIE BIT(8) +#define RXCEIE BIT(4) +#define TXCEIE BIT(0) +#define MTL_ECC_INT_STATUS 0x00000ccc +#define MTL_DPP_CONTROL 0x00000ce0 +#define EPSI BIT(2) +#define OPE BIT(1) +#define EDPP BIT(0) + +#define DMA_SAFETY_INT_STATUS 0x00001080 +#define MSUIS BIT(29) +#define MSCIS BIT(28) +#define DEUIS BIT(1) +#define DECIS BIT(0) +#define DMA_ECC_INT_ENABLE 0x00001084 +#define TCEIE BIT(0) +#define DMA_ECC_INT_STATUS 0x00001088 + +/* EQoS version 5.xx VLAN Tag Filter Fail Packets Queuing */ +#define GMAC_RXQ_CTRL4 0x00000094 +#define GMAC_RXQCTRL_VFFQ_MASK GENMASK(19, 17) +#define GMAC_RXQCTRL_VFFQ_SHIFT 17 +#define GMAC_RXQCTRL_VFFQE BIT(16) + +#define GMAC_INT_FPE_EN BIT(17) + +int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_feature_cfg *safety_cfg); +int dwmac5_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats); +int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc); +int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count); +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); +int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, + unsigned int ptp_rate); +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); +void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, + bool enable); +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, + enum stmmac_mpacket_type type); +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev); + +#endif /* __DWMAC5_H__ */ diff --git a/devices/stmmac/dwmac5-6.1-orig.c b/devices/stmmac/dwmac5-6.1-orig.c new file mode 100644 index 00000000..8fd16750 --- /dev/null +++ b/devices/stmmac/dwmac5-6.1-orig.c @@ -0,0 +1,777 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates. +// stmmac Support for 5.xx Ethernet QoS cores + +#include +#include +#include "common.h" +#include "dwmac4.h" +#include "dwmac5.h" +#include "stmmac.h" +#include "stmmac_ptp.h" + +struct dwmac5_error_desc { + bool valid; + const char *desc; + const char *detailed_desc; +}; + +#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field) + +static void dwmac5_log_error(struct net_device *ndev, u32 value, bool corr, + const char *module_name, const struct dwmac5_error_desc *desc, + unsigned long field_offset, struct stmmac_safety_stats *stats) +{ + unsigned long loc, mask; + u8 *bptr = (u8 *)stats; + unsigned long *ptr; + + ptr = (unsigned long *)(bptr + field_offset); + + mask = value; + for_each_set_bit(loc, &mask, 32) { + netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ? + "correctable" : "uncorrectable", module_name, + desc[loc].desc, desc[loc].detailed_desc); + + /* Update counters */ + ptr[loc]++; + } +} + +static const struct dwmac5_error_desc dwmac5_mac_errors[32]= { + { true, "ATPES", "Application Transmit Interface Parity Check Error" }, + { true, "TPES", "TSO Data Path Parity Check Error" }, + { true, "RDPES", "Read Descriptor Parity Check Error" }, + { true, "MPES", "MTL Data Path Parity Check Error" }, + { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" }, + { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" }, + { true, "CWPES", "CSR Write Data Path Parity Check Error" }, + { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" }, + { true, "TTES", "TX FSM Timeout Error" }, + { true, "RTES", "RX FSM Timeout Error" }, + { true, "CTES", "CSR FSM Timeout Error" }, + { true, "ATES", "APP FSM Timeout Error" }, + { true, "PTES", "PTP FSM Timeout Error" }, + { true, "T125ES", "TX125 FSM Timeout Error" }, + { true, "R125ES", "RX125 FSM Timeout Error" }, + { true, "RVCTES", "REV MDC FSM Timeout Error" }, + { true, "MSTTES", "Master Read/Write Timeout Error" }, + { true, "SLVTES", "Slave Read/Write Timeout Error" }, + { true, "ATITES", "Application Timeout on ATI Interface Error" }, + { true, "ARITES", "Application Timeout on ARI Interface Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { true, "FSMPES", "FSM State Parity Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwmac5_handle_mac_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + MAC_DPP_FSM_INT_STATUS); + writel(value, ioaddr + MAC_DPP_FSM_INT_STATUS); + + dwmac5_log_error(ndev, value, correctable, "MAC", dwmac5_mac_errors, + STAT_OFF(mac_errors), stats); +} + +static const struct dwmac5_error_desc dwmac5_mtl_errors[32]= { + { true, "TXCES", "MTL TX Memory Error" }, + { true, "TXAMS", "MTL TX Memory Address Mismatch Error" }, + { true, "TXUES", "MTL TX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { true, "RXCES", "MTL RX Memory Error" }, + { true, "RXAMS", "MTL RX Memory Address Mismatch Error" }, + { true, "RXUES", "MTL RX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { true, "ECES", "MTL EST Memory Error" }, + { true, "EAMS", "MTL EST Memory Address Mismatch Error" }, + { true, "EUES", "MTL EST Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { true, "RPCES", "MTL RX Parser Memory Error" }, + { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" }, + { true, "RPUES", "MTL RX Parser Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwmac5_handle_mtl_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + MTL_ECC_INT_STATUS); + writel(value, ioaddr + MTL_ECC_INT_STATUS); + + dwmac5_log_error(ndev, value, correctable, "MTL", dwmac5_mtl_errors, + STAT_OFF(mtl_errors), stats); +} + +static const struct dwmac5_error_desc dwmac5_dma_errors[32]= { + { true, "TCES", "DMA TSO Memory Error" }, + { true, "TAMS", "DMA TSO Memory Address Mismatch Error" }, + { true, "TUES", "DMA TSO Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { false, "UNKNOWN", "Unknown Error" }, /* 4 */ + { false, "UNKNOWN", "Unknown Error" }, /* 5 */ + { false, "UNKNOWN", "Unknown Error" }, /* 6 */ + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { false, "UNKNOWN", "Unknown Error" }, /* 8 */ + { false, "UNKNOWN", "Unknown Error" }, /* 9 */ + { false, "UNKNOWN", "Unknown Error" }, /* 10 */ + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { false, "UNKNOWN", "Unknown Error" }, /* 12 */ + { false, "UNKNOWN", "Unknown Error" }, /* 13 */ + { false, "UNKNOWN", "Unknown Error" }, /* 14 */ + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwmac5_handle_dma_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + DMA_ECC_INT_STATUS); + writel(value, ioaddr + DMA_ECC_INT_STATUS); + + dwmac5_log_error(ndev, value, correctable, "DMA", dwmac5_dma_errors, + STAT_OFF(dma_errors), stats); +} + +int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_feature_cfg *safety_feat_cfg) +{ + struct stmmac_safety_feature_cfg all_safety_feats = { + .tsoee = 1, + .mrxpee = 1, + .mestee = 1, + .mrxee = 1, + .mtxee = 1, + .epsi = 1, + .edpp = 1, + .prtyen = 1, + .tmouten = 1, + }; + u32 value; + + if (!asp) + return -EINVAL; + + if (!safety_feat_cfg) + safety_feat_cfg = &all_safety_feats; + + /* 1. Enable Safety Features */ + value = readl(ioaddr + MTL_ECC_CONTROL); + value |= MEEAO; /* MTL ECC Error Addr Status Override */ + if (safety_feat_cfg->tsoee) + value |= TSOEE; /* TSO ECC */ + if (safety_feat_cfg->mrxpee) + value |= MRXPEE; /* MTL RX Parser ECC */ + if (safety_feat_cfg->mestee) + value |= MESTEE; /* MTL EST ECC */ + if (safety_feat_cfg->mrxee) + value |= MRXEE; /* MTL RX FIFO ECC */ + if (safety_feat_cfg->mtxee) + value |= MTXEE; /* MTL TX FIFO ECC */ + writel(value, ioaddr + MTL_ECC_CONTROL); + + /* 2. Enable MTL Safety Interrupts */ + value = readl(ioaddr + MTL_ECC_INT_ENABLE); + value |= RPCEIE; /* RX Parser Memory Correctable Error */ + value |= ECEIE; /* EST Memory Correctable Error */ + value |= RXCEIE; /* RX Memory Correctable Error */ + value |= TXCEIE; /* TX Memory Correctable Error */ + writel(value, ioaddr + MTL_ECC_INT_ENABLE); + + /* 3. Enable DMA Safety Interrupts */ + value = readl(ioaddr + DMA_ECC_INT_ENABLE); + value |= TCEIE; /* TSO Memory Correctable Error */ + writel(value, ioaddr + DMA_ECC_INT_ENABLE); + + /* Only ECC Protection for External Memory feature is selected */ + if (asp <= 0x1) + return 0; + + /* 5. Enable Parity and Timeout for FSM */ + value = readl(ioaddr + MAC_FSM_CONTROL); + if (safety_feat_cfg->prtyen) + value |= PRTYEN; /* FSM Parity Feature */ + if (safety_feat_cfg->tmouten) + value |= TMOUTEN; /* FSM Timeout Feature */ + writel(value, ioaddr + MAC_FSM_CONTROL); + + /* 4. Enable Data Parity Protection */ + value = readl(ioaddr + MTL_DPP_CONTROL); + if (safety_feat_cfg->edpp) + value |= EDPP; + writel(value, ioaddr + MTL_DPP_CONTROL); + + /* + * All the Automotive Safety features are selected without the "Parity + * Port Enable for external interface" feature. + */ + if (asp <= 0x2) + return 0; + + if (safety_feat_cfg->epsi) + value |= EPSI; + writel(value, ioaddr + MTL_DPP_CONTROL); + return 0; +} + +int dwmac5_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats) +{ + bool err, corr; + u32 mtl, dma; + int ret = 0; + + if (!asp) + return -EINVAL; + + mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS); + dma = readl(ioaddr + DMA_SAFETY_INT_STATUS); + + err = (mtl & MCSIS) || (dma & MCSIS); + corr = false; + if (err) { + dwmac5_handle_mac_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = (mtl & (MEUIS | MECIS)) || (dma & (MSUIS | MSCIS)); + corr = (mtl & MECIS) || (dma & MSCIS); + if (err) { + dwmac5_handle_mtl_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = dma & (DEUIS | DECIS); + corr = dma & DECIS; + if (err) { + dwmac5_handle_dma_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + return ret; +} + +static const struct dwmac5_error { + const struct dwmac5_error_desc *desc; +} dwmac5_all_errors[] = { + { dwmac5_mac_errors }, + { dwmac5_mtl_errors }, + { dwmac5_dma_errors }, +}; + +int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc) +{ + int module = index / 32, offset = index % 32; + unsigned long *ptr = (unsigned long *)stats; + + if (module >= ARRAY_SIZE(dwmac5_all_errors)) + return -EINVAL; + if (!dwmac5_all_errors[module].desc[offset].valid) + return -EINVAL; + if (count) + *count = *(ptr + index); + if (desc) + *desc = dwmac5_all_errors[module].desc[offset].desc; + return 0; +} + +static int dwmac5_rxp_disable(void __iomem *ioaddr) +{ + u32 val; + + val = readl(ioaddr + MTL_OPERATION_MODE); + val &= ~MTL_FRPE; + writel(val, ioaddr + MTL_OPERATION_MODE); + + return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val, + val & RXPI, 1, 10000); +} + +static void dwmac5_rxp_enable(void __iomem *ioaddr) +{ + u32 val; + + val = readl(ioaddr + MTL_OPERATION_MODE); + val |= MTL_FRPE; + writel(val, ioaddr + MTL_OPERATION_MODE); +} + +static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr, + struct stmmac_tc_entry *entry, + int pos) +{ + int ret, i; + + for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) { + int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i; + u32 val; + + /* Wait for ready */ + ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS, + val, !(val & STARTBUSY), 1, 10000); + if (ret) + return ret; + + /* Write data */ + val = *((u32 *)&entry->val + i); + writel(val, ioaddr + MTL_RXP_IACC_DATA); + + /* Write pos */ + val = real_pos & ADDR; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Write OP */ + val |= WRRDN; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Start Write */ + val |= STARTBUSY; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Wait for done */ + ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS, + val, !(val & STARTBUSY), 1, 10000); + if (ret) + return ret; + } + + return 0; +} + +static struct stmmac_tc_entry * +dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count, + u32 curr_prio) +{ + struct stmmac_tc_entry *entry; + u32 min_prio = ~0x0; + int i, min_prio_idx; + bool found = false; + + for (i = count - 1; i >= 0; i--) { + entry = &entries[i]; + + /* Do not update unused entries */ + if (!entry->in_use) + continue; + /* Do not update already updated entries (i.e. fragments) */ + if (entry->in_hw) + continue; + /* Let last entry be updated last */ + if (entry->is_last) + continue; + /* Do not return fragments */ + if (entry->is_frag) + continue; + /* Check if we already checked this prio */ + if (entry->prio < curr_prio) + continue; + /* Check if this is the minimum prio */ + if (entry->prio < min_prio) { + min_prio = entry->prio; + min_prio_idx = i; + found = true; + } + } + + if (found) + return &entries[min_prio_idx]; + return NULL; +} + +int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count) +{ + struct stmmac_tc_entry *entry, *frag; + int i, ret, nve = 0; + u32 curr_prio = 0; + u32 old_val, val; + + /* Force disable RX */ + old_val = readl(ioaddr + GMAC_CONFIG); + val = old_val & ~GMAC_CONFIG_RE; + writel(val, ioaddr + GMAC_CONFIG); + + /* Disable RX Parser */ + ret = dwmac5_rxp_disable(ioaddr); + if (ret) + goto re_enable; + + /* Set all entries as NOT in HW */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + entry->in_hw = false; + } + + /* Update entries by reverse order */ + while (1) { + entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio); + if (!entry) + break; + + curr_prio = entry->prio; + frag = entry->frag_ptr; + + /* Set special fragment requirements */ + if (frag) { + entry->val.af = 0; + entry->val.rf = 0; + entry->val.nc = 1; + entry->val.ok_index = nve + 2; + } + + ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + entry->in_hw = true; + + if (frag && !frag->in_hw) { + ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve); + if (ret) + goto re_enable; + frag->table_pos = nve++; + frag->in_hw = true; + } + } + + if (!nve) + goto re_enable; + + /* Update all pass entry */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + if (!entry->is_last) + continue; + + ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + } + + /* Assume n. of parsable entries == n. of valid entries */ + val = (nve << 16) & NPE; + val |= nve & NVE; + writel(val, ioaddr + MTL_RXP_CONTROL_STATUS); + + /* Enable RX Parser */ + dwmac5_rxp_enable(ioaddr); + +re_enable: + /* Re-enable RX */ + writel(old_val, ioaddr + GMAC_CONFIG); + return ret; +} + +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags) +{ + u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + u32 val = readl(ioaddr + MAC_PPS_CONTROL); + u64 period; + + if (!cfg->available) + return -EINVAL; + if (tnsec & TRGTBUSY0) + return -EBUSY; + if (!sub_second_inc || !systime_flags) + return -EINVAL; + + val &= ~PPSx_MASK(index); + + if (!enable) { + val |= PPSCMDx(index, 0x5); + val |= PPSEN0; + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; + } + + val |= TRGTMODSELx(index, 0x2); + val |= PPSEN0; + writel(val, ioaddr + MAC_PPS_CONTROL); + + writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); + + if (!(systime_flags & PTP_TCR_TSCTRLSSR)) + cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; + writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + + period = cfg->period.tv_sec * 1000000000; + period += cfg->period.tv_nsec; + + do_div(period, sub_second_inc); + + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index)); + + period >>= 1; + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index)); + + /* Finally, activate it */ + val |= PPSCMDx(index, 0x2); + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; +} + +static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl) +{ + u32 ctrl; + + writel(val, ioaddr + MTL_EST_GCL_DATA); + + ctrl = (reg << ADDR_SHIFT); + ctrl |= gcl ? 0 : GCRR; + + writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL); + + ctrl |= SRWO; + writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL); + + return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL, + ctrl, !(ctrl & SRWO), 100, 5000); +} + +int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, + unsigned int ptp_rate) +{ + int i, ret = 0x0; + u32 ctrl; + + ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false); + ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false); + ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false); + ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false); + ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false); + ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false); + if (ret) + return ret; + + for (i = 0; i < cfg->gcl_size; i++) { + ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true); + if (ret) + return ret; + } + + ctrl = readl(ioaddr + MTL_EST_CONTROL); + ctrl &= ~PTOV; + ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT; + if (cfg->enable) + ctrl |= EEST | SSWL; + else + ctrl &= ~EEST; + + writel(ctrl, ioaddr + MTL_EST_CONTROL); + + /* Configure EST interrupt */ + if (cfg->enable) + ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC); + else + ctrl = 0; + + writel(ctrl, ioaddr + MTL_EST_INT_EN); + + return 0; +} + +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt) +{ + u32 status, value, feqn, hbfq, hbfs, btrl; + u32 txqcnt_mask = (1 << txqcnt) - 1; + + status = readl(ioaddr + MTL_EST_STATUS); + + value = (CGCE | HLBS | HLBF | BTRE | SWLC); + + /* Return if there is no error */ + if (!(status & value)) + return; + + if (status & CGCE) { + /* Clear Interrupt */ + writel(CGCE, ioaddr + MTL_EST_STATUS); + + x->mtl_est_cgce++; + } + + if (status & HLBS) { + value = readl(ioaddr + MTL_EST_SCH_ERR); + value &= txqcnt_mask; + + x->mtl_est_hlbs++; + + /* Clear Interrupt */ + writel(value, ioaddr + MTL_EST_SCH_ERR); + + /* Collecting info to shows all the queues that has HLBS + * issue. The only way to clear this is to clear the + * statistic + */ + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value); + } + + if (status & HLBF) { + value = readl(ioaddr + MTL_EST_FRM_SZ_ERR); + feqn = value & txqcnt_mask; + + value = readl(ioaddr + MTL_EST_FRM_SZ_CAP); + hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT; + hbfs = value & SZ_CAP_HBFS_MASK; + + x->mtl_est_hlbf++; + + /* Clear Interrupt */ + writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR); + + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n", + hbfq, hbfs); + } + + if (status & BTRE) { + if ((status & BTRL) == BTRL_MAX) + x->mtl_est_btrlm++; + else + x->mtl_est_btre++; + + btrl = (status & BTRL) >> BTRL_SHIFT; + + if (net_ratelimit()) + netdev_info(dev, "EST: BTR Error Loop Count %u\n", + btrl); + + writel(BTRE, ioaddr + MTL_EST_STATUS); + } + + if (status & SWLC) { + writel(SWLC, ioaddr + MTL_EST_STATUS); + netdev_info(dev, "EST: SWOL has been switched\n"); + } +} + +void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, + bool enable) +{ + u32 value; + + if (enable) { + cfg->fpe_csr = EFPE; + value = readl(ioaddr + GMAC_RXQ_CTRL1); + value &= ~GMAC_RXQCTRL_FPRQ; + value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT; + writel(value, ioaddr + GMAC_RXQ_CTRL1); + } else { + cfg->fpe_csr = 0; + } + writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS); +} + +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) +{ + u32 value; + int status; + + status = FPE_EVENT_UNKNOWN; + + /* Reads from the MAC_FPE_CTRL_STS register should only be performed + * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read" + */ + value = readl(ioaddr + MAC_FPE_CTRL_STS); + + if (value & TRSP) { + status |= FPE_EVENT_TRSP; + netdev_info(dev, "FPE: Respond mPacket is transmitted\n"); + } + + if (value & TVER) { + status |= FPE_EVENT_TVER; + netdev_info(dev, "FPE: Verify mPacket is transmitted\n"); + } + + if (value & RRSP) { + status |= FPE_EVENT_RRSP; + netdev_info(dev, "FPE: Respond mPacket is received\n"); + } + + if (value & RVER) { + status |= FPE_EVENT_RVER; + netdev_info(dev, "FPE: Verify mPacket is received\n"); + } + + return status; +} + +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + enum stmmac_mpacket_type type) +{ + u32 value = cfg->fpe_csr; + + if (type == MPACKET_VERIFY) + value |= SVER; + else if (type == MPACKET_RESPONSE) + value |= SRSP; + + writel(value, ioaddr + MAC_FPE_CTRL_STS); +} diff --git a/devices/stmmac/dwmac5-6.1-orig.h b/devices/stmmac/dwmac5-6.1-orig.h new file mode 100644 index 00000000..34e62079 --- /dev/null +++ b/devices/stmmac/dwmac5-6.1-orig.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates. +// stmmac Support for 5.xx Ethernet QoS cores + +#ifndef __DWMAC5_H__ +#define __DWMAC5_H__ + +#define MAC_DPP_FSM_INT_STATUS 0x00000140 +#define MAC_AXI_SLV_DPE_ADDR_STATUS 0x00000144 +#define MAC_FSM_CONTROL 0x00000148 +#define PRTYEN BIT(1) +#define TMOUTEN BIT(0) + +#define MAC_FPE_CTRL_STS 0x00000234 +#define TRSP BIT(19) +#define TVER BIT(18) +#define RRSP BIT(17) +#define RVER BIT(16) +#define SRSP BIT(2) +#define SVER BIT(1) +#define EFPE BIT(0) + +#define MAC_PPS_CONTROL 0x00000b70 +#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1) +#define PPS_MINIDX(x) ((x) * 8) +#define PPSx_MASK(x) GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x)) +#define MCGRENx(x) BIT(PPS_MAXIDX(x)) +#define TRGTMODSELx(x, val) \ + GENMASK(PPS_MAXIDX(x) - 1, PPS_MAXIDX(x) - 2) & \ + ((val) << (PPS_MAXIDX(x) - 2)) +#define PPSCMDx(x, val) \ + GENMASK(PPS_MINIDX(x) + 3, PPS_MINIDX(x)) & \ + ((val) << PPS_MINIDX(x)) +#define PPSEN0 BIT(4) +#define MAC_PPSx_TARGET_TIME_SEC(x) (0x00000b80 + ((x) * 0x10)) +#define MAC_PPSx_TARGET_TIME_NSEC(x) (0x00000b84 + ((x) * 0x10)) +#define TRGTBUSY0 BIT(31) +#define TTSL0 GENMASK(30, 0) +#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10)) +#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10)) + +#define MTL_EST_CONTROL 0x00000c50 +#define PTOV GENMASK(31, 24) +#define PTOV_SHIFT 24 +#define SSWL BIT(1) +#define EEST BIT(0) + +#define MTL_EST_STATUS 0x00000c58 +#define BTRL GENMASK(11, 8) +#define BTRL_SHIFT 8 +#define BTRL_MAX (0xF << BTRL_SHIFT) +#define SWOL BIT(7) +#define SWOL_SHIFT 7 +#define CGCE BIT(4) +#define HLBS BIT(3) +#define HLBF BIT(2) +#define BTRE BIT(1) +#define SWLC BIT(0) + +#define MTL_EST_SCH_ERR 0x00000c60 +#define MTL_EST_FRM_SZ_ERR 0x00000c64 +#define MTL_EST_FRM_SZ_CAP 0x00000c68 +#define SZ_CAP_HBFS_MASK GENMASK(14, 0) +#define SZ_CAP_HBFQ_SHIFT 16 +#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \ + ((val) > 4 ? GENMASK(18, 16) : \ + (val) > 2 ? GENMASK(17, 16) : \ + BIT(16)); }) + +#define MTL_EST_INT_EN 0x00000c70 +#define IECGCE CGCE +#define IEHS HLBS +#define IEHF HLBF +#define IEBE BTRE +#define IECC SWLC + +#define MTL_EST_GCL_CONTROL 0x00000c80 +#define BTR_LOW 0x0 +#define BTR_HIGH 0x1 +#define CTR_LOW 0x2 +#define CTR_HIGH 0x3 +#define TER 0x4 +#define LLR 0x5 +#define ADDR_SHIFT 8 +#define GCRR BIT(2) +#define SRWO BIT(0) +#define MTL_EST_GCL_DATA 0x00000c84 + +#define MTL_RXP_CONTROL_STATUS 0x00000ca0 +#define RXPI BIT(31) +#define NPE GENMASK(23, 16) +#define NVE GENMASK(7, 0) +#define MTL_RXP_IACC_CTRL_STATUS 0x00000cb0 +#define STARTBUSY BIT(31) +#define RXPEIEC GENMASK(22, 21) +#define RXPEIEE BIT(20) +#define WRRDN BIT(16) +#define ADDR GENMASK(15, 0) +#define MTL_RXP_IACC_DATA 0x00000cb4 +#define MTL_ECC_CONTROL 0x00000cc0 +#define MEEAO BIT(8) +#define TSOEE BIT(4) +#define MRXPEE BIT(3) +#define MESTEE BIT(2) +#define MRXEE BIT(1) +#define MTXEE BIT(0) + +#define MTL_SAFETY_INT_STATUS 0x00000cc4 +#define MCSIS BIT(31) +#define MEUIS BIT(1) +#define MECIS BIT(0) +#define MTL_ECC_INT_ENABLE 0x00000cc8 +#define RPCEIE BIT(12) +#define ECEIE BIT(8) +#define RXCEIE BIT(4) +#define TXCEIE BIT(0) +#define MTL_ECC_INT_STATUS 0x00000ccc +#define MTL_DPP_CONTROL 0x00000ce0 +#define EPSI BIT(2) +#define OPE BIT(1) +#define EDPP BIT(0) + +#define DMA_SAFETY_INT_STATUS 0x00001080 +#define MSUIS BIT(29) +#define MSCIS BIT(28) +#define DEUIS BIT(1) +#define DECIS BIT(0) +#define DMA_ECC_INT_ENABLE 0x00001084 +#define TCEIE BIT(0) +#define DMA_ECC_INT_STATUS 0x00001088 + +/* EQoS version 5.xx VLAN Tag Filter Fail Packets Queuing */ +#define GMAC_RXQ_CTRL4 0x00000094 +#define GMAC_RXQCTRL_VFFQ_MASK GENMASK(19, 17) +#define GMAC_RXQCTRL_VFFQ_SHIFT 17 +#define GMAC_RXQCTRL_VFFQE BIT(16) + +#define GMAC_INT_FPE_EN BIT(17) + +int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_feature_cfg *safety_cfg); +int dwmac5_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats); +int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc); +int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count); +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); +int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, + unsigned int ptp_rate); +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); +void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, + bool enable); +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, + enum stmmac_mpacket_type type); +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev); + +#endif /* __DWMAC5_H__ */ diff --git a/devices/stmmac/dwmac_dma-6.1-ethercat.h b/devices/stmmac/dwmac_dma-6.1-ethercat.h new file mode 100644 index 00000000..acd70b9a --- /dev/null +++ b/devices/stmmac/dwmac_dma-6.1-ethercat.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + DWMAC DMA Header file. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DWMAC_DMA_H__ +#define __DWMAC_DMA_H__ + +/* DMA CRS Control and Status Register Mapping */ +#define DMA_BUS_MODE 0x00001000 /* Bus Mode */ +#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ +#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */ +#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */ +#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */ +#define DMA_STATUS 0x00001014 /* Status Register */ +#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ +#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ +#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ + +/* SW Reset */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ + +/* Rx watchdog register */ +#define DMA_RX_WATCHDOG 0x00001024 + +/* AXI Master Bus Mode */ +#define DMA_AXI_BUS_MODE 0x00001028 + +#define DMA_AXI_EN_LPI BIT(31) +#define DMA_AXI_LPI_XIT_FRM BIT(30) +#define DMA_AXI_WR_OSR_LMT GENMASK(23, 20) +#define DMA_AXI_WR_OSR_LMT_SHIFT 20 +#define DMA_AXI_WR_OSR_LMT_MASK 0xf +#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16) +#define DMA_AXI_RD_OSR_LMT_SHIFT 16 +#define DMA_AXI_RD_OSR_LMT_MASK 0xf + +#define DMA_AXI_OSR_MAX 0xf +#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \ + (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT)) +#define DMA_AXI_1KBBE BIT(13) +#define DMA_AXI_AAL BIT(12) +#define DMA_AXI_BLEN256 BIT(7) +#define DMA_AXI_BLEN128 BIT(6) +#define DMA_AXI_BLEN64 BIT(5) +#define DMA_AXI_BLEN32 BIT(4) +#define DMA_AXI_BLEN16 BIT(3) +#define DMA_AXI_BLEN8 BIT(2) +#define DMA_AXI_BLEN4 BIT(1) +#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ + DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \ + DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ + DMA_AXI_BLEN4) + +#define DMA_AXI_UNDEF BIT(0) + +#define DMA_AXI_BURST_LEN_MASK 0x000000FE + +#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ +#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ +#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ + +/* DMA Control register defines */ +#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ +#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ + +/* DMA Normal interrupt */ +#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ +#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ +#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ +#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ + +#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ + DMA_INTR_ENA_TIE) + +/* DMA Abnormal interrupt */ +#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ +#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ +#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ +#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ +#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ +#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ +#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ +#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ +#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ + +#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ + DMA_INTR_ENA_UNE) + +/* DMA default interrupt mask */ +#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) +#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE) +#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE) + +/* DMA Status register defines */ +#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */ +#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */ +#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ +#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ +#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ +#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ +#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ +#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ +#define DMA_STATUS_TS_SHIFT 20 +#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ +#define DMA_STATUS_RS_SHIFT 17 +#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ +#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ +#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ +#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ +#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ +#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ +#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ +#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ +#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ +#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ +#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ +#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ +#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ +#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ + +#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \ + DMA_STATUS_AIS | \ + DMA_STATUS_FBI) + +#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \ + DMA_STATUS_RWT | \ + DMA_STATUS_RPS | \ + DMA_STATUS_RU | \ + DMA_STATUS_RI | \ + DMA_STATUS_OVF | \ + DMA_STATUS_MSK_COMMON) + +#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \ + DMA_STATUS_UNF | \ + DMA_STATUS_TJT | \ + DMA_STATUS_TU | \ + DMA_STATUS_TPS | \ + DMA_STATUS_TI | \ + DMA_STATUS_MSK_COMMON) + +#define NUM_DWMAC100_DMA_REGS 9 +#define NUM_DWMAC1000_DMA_REGS 23 +#define NUM_DWMAC4_DMA_REGS 27 + +void dwmac_enable_dma_transmission(void __iomem *ioaddr); +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan); +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan); +int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 chan, u32 dir); +int dwmac_dma_reset(void __iomem *ioaddr); + +#endif /* __DWMAC_DMA_H__ */ diff --git a/devices/stmmac/dwmac_dma-6.1-orig.h b/devices/stmmac/dwmac_dma-6.1-orig.h new file mode 100644 index 00000000..acd70b9a --- /dev/null +++ b/devices/stmmac/dwmac_dma-6.1-orig.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + DWMAC DMA Header file. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DWMAC_DMA_H__ +#define __DWMAC_DMA_H__ + +/* DMA CRS Control and Status Register Mapping */ +#define DMA_BUS_MODE 0x00001000 /* Bus Mode */ +#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ +#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */ +#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */ +#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */ +#define DMA_STATUS 0x00001014 /* Status Register */ +#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ +#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ +#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ + +/* SW Reset */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ + +/* Rx watchdog register */ +#define DMA_RX_WATCHDOG 0x00001024 + +/* AXI Master Bus Mode */ +#define DMA_AXI_BUS_MODE 0x00001028 + +#define DMA_AXI_EN_LPI BIT(31) +#define DMA_AXI_LPI_XIT_FRM BIT(30) +#define DMA_AXI_WR_OSR_LMT GENMASK(23, 20) +#define DMA_AXI_WR_OSR_LMT_SHIFT 20 +#define DMA_AXI_WR_OSR_LMT_MASK 0xf +#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16) +#define DMA_AXI_RD_OSR_LMT_SHIFT 16 +#define DMA_AXI_RD_OSR_LMT_MASK 0xf + +#define DMA_AXI_OSR_MAX 0xf +#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \ + (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT)) +#define DMA_AXI_1KBBE BIT(13) +#define DMA_AXI_AAL BIT(12) +#define DMA_AXI_BLEN256 BIT(7) +#define DMA_AXI_BLEN128 BIT(6) +#define DMA_AXI_BLEN64 BIT(5) +#define DMA_AXI_BLEN32 BIT(4) +#define DMA_AXI_BLEN16 BIT(3) +#define DMA_AXI_BLEN8 BIT(2) +#define DMA_AXI_BLEN4 BIT(1) +#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ + DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \ + DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ + DMA_AXI_BLEN4) + +#define DMA_AXI_UNDEF BIT(0) + +#define DMA_AXI_BURST_LEN_MASK 0x000000FE + +#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ +#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ +#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ + +/* DMA Control register defines */ +#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ +#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ + +/* DMA Normal interrupt */ +#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ +#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ +#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ +#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ + +#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ + DMA_INTR_ENA_TIE) + +/* DMA Abnormal interrupt */ +#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ +#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ +#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ +#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ +#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ +#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ +#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ +#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ +#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ + +#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ + DMA_INTR_ENA_UNE) + +/* DMA default interrupt mask */ +#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) +#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE) +#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE) + +/* DMA Status register defines */ +#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */ +#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */ +#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ +#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ +#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ +#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ +#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ +#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ +#define DMA_STATUS_TS_SHIFT 20 +#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ +#define DMA_STATUS_RS_SHIFT 17 +#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ +#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ +#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ +#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ +#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ +#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ +#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ +#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ +#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ +#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ +#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ +#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ +#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ +#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ + +#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \ + DMA_STATUS_AIS | \ + DMA_STATUS_FBI) + +#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \ + DMA_STATUS_RWT | \ + DMA_STATUS_RPS | \ + DMA_STATUS_RU | \ + DMA_STATUS_RI | \ + DMA_STATUS_OVF | \ + DMA_STATUS_MSK_COMMON) + +#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \ + DMA_STATUS_UNF | \ + DMA_STATUS_TJT | \ + DMA_STATUS_TU | \ + DMA_STATUS_TPS | \ + DMA_STATUS_TI | \ + DMA_STATUS_MSK_COMMON) + +#define NUM_DWMAC100_DMA_REGS 9 +#define NUM_DWMAC1000_DMA_REGS 23 +#define NUM_DWMAC4_DMA_REGS 27 + +void dwmac_enable_dma_transmission(void __iomem *ioaddr); +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan); +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan); +int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 chan, u32 dir); +int dwmac_dma_reset(void __iomem *ioaddr); + +#endif /* __DWMAC_DMA_H__ */ diff --git a/devices/stmmac/dwmac_lib-6.1-ethercat.c b/devices/stmmac/dwmac_lib-6.1-ethercat.c new file mode 100644 index 00000000..b93ad883 --- /dev/null +++ b/devices/stmmac/dwmac_lib-6.1-ethercat.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include "common-6.1-ethercat.h" +#include "dwmac_dma-6.1-ethercat.h" + +#define GMAC_HI_REG_AE 0x80000000 + +int dwmac_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + + return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, + !(value & DMA_BUS_MODE_SFT_RESET), + 10000, 200000); +} + +/* CSR1 enables the transmit DMA to check for new descriptor */ +void dwmac_enable_dma_transmission(void __iomem *ioaddr) +{ + writel(1, ioaddr + DMA_XMT_POLL_DEMAND); +} + +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_INTR_ENA); + + if (rx) + value |= DMA_INTR_DEFAULT_RX; + if (tx) + value |= DMA_INTR_DEFAULT_TX; + + writel(value, ioaddr + DMA_INTR_ENA); +} + +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_INTR_ENA); + + if (rx) + value &= ~DMA_INTR_DEFAULT_RX; + if (tx) + value &= ~DMA_INTR_DEFAULT_TX; + + writel(value, ioaddr + DMA_INTR_ENA); +} + +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); +} + +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); +} + +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); +} + +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); +} + +#ifdef DWMAC_DMA_DEBUG +static void show_tx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT; + + switch (state) { + case 0: + pr_debug("- TX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_debug("- TX (Running): Fetching the Tx desc\n"); + break; + case 2: + pr_debug("- TX (Running): Waiting for end of tx\n"); + break; + case 3: + pr_debug("- TX (Running): Reading the data " + "and queuing the data into the Tx buf\n"); + break; + case 6: + pr_debug("- TX (Suspended): Tx Buff Underflow " + "or an unavailable Transmit descriptor\n"); + break; + case 7: + pr_debug("- TX (Running): Closing Tx descriptor\n"); + break; + default: + break; + } +} + +static void show_rx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT; + + switch (state) { + case 0: + pr_debug("- RX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_debug("- RX (Running): Fetching the Rx desc\n"); + break; + case 2: + pr_debug("- RX (Running): Checking for end of pkt\n"); + break; + case 3: + pr_debug("- RX (Running): Waiting for Rx pkt\n"); + break; + case 4: + pr_debug("- RX (Suspended): Unavailable Rx buf\n"); + break; + case 5: + pr_debug("- RX (Running): Closing Rx descriptor\n"); + break; + case 6: + pr_debug("- RX(Running): Flushing the current frame" + " from the Rx buf\n"); + break; + case 7: + pr_debug("- RX (Running): Queuing the Rx frame" + " from the Rx buf into memory\n"); + break; + default: + break; + } +} +#endif + +int dwmac_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, u32 dir) +{ + int ret = 0; + /* read the status register (CSR5) */ + u32 intr_status = readl(ioaddr + DMA_STATUS); + +#ifdef DWMAC_DMA_DEBUG + /* Enable it to monitor DMA rx/tx status in case of critical problems */ + pr_debug("%s: [CSR5: 0x%08x]\n", __func__, intr_status); + show_tx_process_state(intr_status); + show_rx_process_state(intr_status); +#endif + + if (dir == DMA_DIR_RX) + intr_status &= DMA_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_STATUS_MSK_TX; + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_STATUS_AIS)) { + if (unlikely(intr_status & DMA_STATUS_UNF)) { + ret = tx_hard_error_bump_tc; + x->tx_undeflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TJT)) + x->tx_jabber_irq++; + + if (unlikely(intr_status & DMA_STATUS_OVF)) + x->rx_overflow_irq++; + + if (unlikely(intr_status & DMA_STATUS_RU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(intr_status & DMA_STATUS_FBI)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & DMA_STATUS_NIS)) { + x->normal_irq_n++; + if (likely(intr_status & DMA_STATUS_RI)) { + u32 value = readl(ioaddr + DMA_INTR_ENA); + /* to schedule NAPI on real RIE event. */ + if (likely(value & DMA_INTR_ENA_RIE)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + } + if (likely(intr_status & DMA_STATUS_TI)) { + x->tx_normal_irq_n++; + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_STATUS_ERI)) + x->rx_early_irq++; + } + /* Optional hardware blocks, interrupts should be disabled */ + if (unlikely(intr_status & + (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) + pr_warn("%s: unexpected status %08x\n", __func__, intr_status); + + /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ + writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); + + return ret; +} + +void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); + + do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); +} + +void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], + unsigned int high, unsigned int low) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + /* For MAC Addr registers we have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + writel(data | GMAC_HI_REG_AE, ioaddr + high); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); +} + +/* Enable disable MAC RX/TX */ +void stmmac_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 old_val, value; + + old_val = readl(ioaddr + MAC_CTRL_REG); + value = old_val; + + if (enable) + value |= MAC_ENABLE_RX | MAC_ENABLE_TX; + else + value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); + + if (value != old_val) + writel(value, ioaddr + MAC_CTRL_REG); +} + +void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low) +{ + unsigned int hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + high); + lo_addr = readl(ioaddr + low); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} diff --git a/devices/stmmac/dwmac_lib-6.1-orig.c b/devices/stmmac/dwmac_lib-6.1-orig.c new file mode 100644 index 00000000..9b6138b1 --- /dev/null +++ b/devices/stmmac/dwmac_lib-6.1-orig.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include "common.h" +#include "dwmac_dma.h" + +#define GMAC_HI_REG_AE 0x80000000 + +int dwmac_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + + return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, + !(value & DMA_BUS_MODE_SFT_RESET), + 10000, 200000); +} + +/* CSR1 enables the transmit DMA to check for new descriptor */ +void dwmac_enable_dma_transmission(void __iomem *ioaddr) +{ + writel(1, ioaddr + DMA_XMT_POLL_DEMAND); +} + +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_INTR_ENA); + + if (rx) + value |= DMA_INTR_DEFAULT_RX; + if (tx) + value |= DMA_INTR_DEFAULT_TX; + + writel(value, ioaddr + DMA_INTR_ENA); +} + +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) +{ + u32 value = readl(ioaddr + DMA_INTR_ENA); + + if (rx) + value &= ~DMA_INTR_DEFAULT_RX; + if (tx) + value &= ~DMA_INTR_DEFAULT_TX; + + writel(value, ioaddr + DMA_INTR_ENA); +} + +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); +} + +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); +} + +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); +} + +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); +} + +#ifdef DWMAC_DMA_DEBUG +static void show_tx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT; + + switch (state) { + case 0: + pr_debug("- TX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_debug("- TX (Running): Fetching the Tx desc\n"); + break; + case 2: + pr_debug("- TX (Running): Waiting for end of tx\n"); + break; + case 3: + pr_debug("- TX (Running): Reading the data " + "and queuing the data into the Tx buf\n"); + break; + case 6: + pr_debug("- TX (Suspended): Tx Buff Underflow " + "or an unavailable Transmit descriptor\n"); + break; + case 7: + pr_debug("- TX (Running): Closing Tx descriptor\n"); + break; + default: + break; + } +} + +static void show_rx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT; + + switch (state) { + case 0: + pr_debug("- RX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_debug("- RX (Running): Fetching the Rx desc\n"); + break; + case 2: + pr_debug("- RX (Running): Checking for end of pkt\n"); + break; + case 3: + pr_debug("- RX (Running): Waiting for Rx pkt\n"); + break; + case 4: + pr_debug("- RX (Suspended): Unavailable Rx buf\n"); + break; + case 5: + pr_debug("- RX (Running): Closing Rx descriptor\n"); + break; + case 6: + pr_debug("- RX(Running): Flushing the current frame" + " from the Rx buf\n"); + break; + case 7: + pr_debug("- RX (Running): Queuing the Rx frame" + " from the Rx buf into memory\n"); + break; + default: + break; + } +} +#endif + +int dwmac_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, u32 dir) +{ + int ret = 0; + /* read the status register (CSR5) */ + u32 intr_status = readl(ioaddr + DMA_STATUS); + +#ifdef DWMAC_DMA_DEBUG + /* Enable it to monitor DMA rx/tx status in case of critical problems */ + pr_debug("%s: [CSR5: 0x%08x]\n", __func__, intr_status); + show_tx_process_state(intr_status); + show_rx_process_state(intr_status); +#endif + + if (dir == DMA_DIR_RX) + intr_status &= DMA_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_STATUS_MSK_TX; + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_STATUS_AIS)) { + if (unlikely(intr_status & DMA_STATUS_UNF)) { + ret = tx_hard_error_bump_tc; + x->tx_undeflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TJT)) + x->tx_jabber_irq++; + + if (unlikely(intr_status & DMA_STATUS_OVF)) + x->rx_overflow_irq++; + + if (unlikely(intr_status & DMA_STATUS_RU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(intr_status & DMA_STATUS_FBI)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & DMA_STATUS_NIS)) { + x->normal_irq_n++; + if (likely(intr_status & DMA_STATUS_RI)) { + u32 value = readl(ioaddr + DMA_INTR_ENA); + /* to schedule NAPI on real RIE event. */ + if (likely(value & DMA_INTR_ENA_RIE)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + } + if (likely(intr_status & DMA_STATUS_TI)) { + x->tx_normal_irq_n++; + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_STATUS_ERI)) + x->rx_early_irq++; + } + /* Optional hardware blocks, interrupts should be disabled */ + if (unlikely(intr_status & + (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) + pr_warn("%s: unexpected status %08x\n", __func__, intr_status); + + /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ + writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); + + return ret; +} + +void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); + + do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); +} + +void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6], + unsigned int high, unsigned int low) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + /* For MAC Addr registers we have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + writel(data | GMAC_HI_REG_AE, ioaddr + high); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); +} +EXPORT_SYMBOL_GPL(stmmac_set_mac_addr); + +/* Enable disable MAC RX/TX */ +void stmmac_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 old_val, value; + + old_val = readl(ioaddr + MAC_CTRL_REG); + value = old_val; + + if (enable) + value |= MAC_ENABLE_RX | MAC_ENABLE_TX; + else + value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); + + if (value != old_val) + writel(value, ioaddr + MAC_CTRL_REG); +} + +void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low) +{ + unsigned int hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + high); + lo_addr = readl(ioaddr + low); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} +EXPORT_SYMBOL_GPL(stmmac_get_mac_addr); diff --git a/devices/stmmac/dwxgmac2-6.1-ethercat.h b/devices/stmmac/dwxgmac2-6.1-ethercat.h new file mode 100644 index 00000000..b246b78c --- /dev/null +++ b/devices/stmmac/dwxgmac2-6.1-ethercat.h @@ -0,0 +1,477 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC definitions. + */ + +#ifndef __STMMAC_DWXGMAC2_H__ +#define __STMMAC_DWXGMAC2_H__ + +#include "common-6.1-ethercat.h" + +/* Misc */ +#define XGMAC_JUMBO_LEN 16368 + +/* MAC Registers */ +#define XGMAC_TX_CONFIG 0x00000000 +#define XGMAC_CONFIG_SS_OFF 29 +#define XGMAC_CONFIG_SS_MASK GENMASK(31, 29) +#define XGMAC_CONFIG_SS_10000 (0x0 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_2500_GMII (0x2 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_1000_GMII (0x3 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_100_MII (0x4 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_5000 (0x5 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_2500 (0x6 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_10_MII (0x7 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SARC GENMASK(22, 20) +#define XGMAC_CONFIG_SARC_SHIFT 20 +#define XGMAC_CONFIG_JD BIT(16) +#define XGMAC_CONFIG_TE BIT(0) +#define XGMAC_CORE_INIT_TX (XGMAC_CONFIG_JD) +#define XGMAC_RX_CONFIG 0x00000004 +#define XGMAC_CONFIG_ARPEN BIT(31) +#define XGMAC_CONFIG_GPSL GENMASK(29, 16) +#define XGMAC_CONFIG_GPSL_SHIFT 16 +#define XGMAC_CONFIG_HDSMS GENMASK(14, 12) +#define XGMAC_CONFIG_HDSMS_SHIFT 12 +#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT) +#define XGMAC_CONFIG_S2KP BIT(11) +#define XGMAC_CONFIG_LM BIT(10) +#define XGMAC_CONFIG_IPC BIT(9) +#define XGMAC_CONFIG_JE BIT(8) +#define XGMAC_CONFIG_WD BIT(7) +#define XGMAC_CONFIG_GPSLCE BIT(6) +#define XGMAC_CONFIG_CST BIT(2) +#define XGMAC_CONFIG_ACS BIT(1) +#define XGMAC_CONFIG_RE BIT(0) +#define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | XGMAC_CONFIG_WD | \ + (XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT)) +#define XGMAC_PACKET_FILTER 0x00000008 +#define XGMAC_FILTER_RA BIT(31) +#define XGMAC_FILTER_IPFE BIT(20) +#define XGMAC_FILTER_VTFE BIT(16) +#define XGMAC_FILTER_HPF BIT(10) +#define XGMAC_FILTER_PCF BIT(7) +#define XGMAC_FILTER_PM BIT(4) +#define XGMAC_FILTER_HMC BIT(2) +#define XGMAC_FILTER_PR BIT(0) +#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4) +#define XGMAC_MAX_HASH_TABLE 8 +#define XGMAC_VLAN_TAG 0x00000050 +#define XGMAC_VLAN_EDVLP BIT(26) +#define XGMAC_VLAN_VTHM BIT(25) +#define XGMAC_VLAN_DOVLTC BIT(20) +#define XGMAC_VLAN_ESVL BIT(18) +#define XGMAC_VLAN_ETV BIT(16) +#define XGMAC_VLAN_VID GENMASK(15, 0) +#define XGMAC_VLAN_HASH_TABLE 0x00000058 +#define XGMAC_VLAN_INCL 0x00000060 +#define XGMAC_VLAN_VLTI BIT(20) +#define XGMAC_VLAN_CSVL BIT(19) +#define XGMAC_VLAN_VLC GENMASK(17, 16) +#define XGMAC_VLAN_VLC_SHIFT 16 +#define XGMAC_RXQ_CTRL0 0x000000a0 +#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) +#define XGMAC_RXQEN_SHIFT(x) ((x) * 2) +#define XGMAC_RXQ_CTRL1 0x000000a4 +#define XGMAC_RQ GENMASK(7, 4) +#define XGMAC_RQ_SHIFT 4 +#define XGMAC_RXQ_CTRL2 0x000000a8 +#define XGMAC_RXQ_CTRL3 0x000000ac +#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8) +#define XGMAC_PSRQ_SHIFT(x) ((x) * 8) +#define XGMAC_INT_STATUS 0x000000b0 +#define XGMAC_LPIIS BIT(5) +#define XGMAC_PMTIS BIT(4) +#define XGMAC_INT_EN 0x000000b4 +#define XGMAC_TSIE BIT(12) +#define XGMAC_LPIIE BIT(5) +#define XGMAC_PMTIE BIT(4) +#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE) +#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4) +#define XGMAC_PT GENMASK(31, 16) +#define XGMAC_PT_SHIFT 16 +#define XGMAC_TFE BIT(1) +#define XGMAC_RX_FLOW_CTRL 0x00000090 +#define XGMAC_RFE BIT(0) +#define XGMAC_PMT 0x000000c0 +#define XGMAC_GLBLUCAST BIT(9) +#define XGMAC_RWKPKTEN BIT(2) +#define XGMAC_MGKPKTEN BIT(1) +#define XGMAC_PWRDWN BIT(0) +#define XGMAC_LPI_CTRL 0x000000d0 +#define XGMAC_TXCGE BIT(21) +#define XGMAC_LPITXA BIT(19) +#define XGMAC_PLS BIT(17) +#define XGMAC_LPITXEN BIT(16) +#define XGMAC_RLPIEX BIT(3) +#define XGMAC_RLPIEN BIT(2) +#define XGMAC_TLPIEX BIT(1) +#define XGMAC_TLPIEN BIT(0) +#define XGMAC_LPI_TIMER_CTRL 0x000000d4 +#define XGMAC_HW_FEATURE0 0x0000011c +#define XGMAC_HWFEAT_SAVLANINS BIT(27) +#define XGMAC_HWFEAT_RXCOESEL BIT(16) +#define XGMAC_HWFEAT_TXCOESEL BIT(14) +#define XGMAC_HWFEAT_EEESEL BIT(13) +#define XGMAC_HWFEAT_TSSEL BIT(12) +#define XGMAC_HWFEAT_AVSEL BIT(11) +#define XGMAC_HWFEAT_RAVSEL BIT(10) +#define XGMAC_HWFEAT_ARPOFFSEL BIT(9) +#define XGMAC_HWFEAT_MMCSEL BIT(8) +#define XGMAC_HWFEAT_MGKSEL BIT(7) +#define XGMAC_HWFEAT_RWKSEL BIT(6) +#define XGMAC_HWFEAT_VLHASH BIT(4) +#define XGMAC_HWFEAT_GMIISEL BIT(1) +#define XGMAC_HW_FEATURE1 0x00000120 +#define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27) +#define XGMAC_HWFEAT_HASHTBLSZ GENMASK(25, 24) +#define XGMAC_HWFEAT_RSSEN BIT(20) +#define XGMAC_HWFEAT_TSOEN BIT(18) +#define XGMAC_HWFEAT_SPHEN BIT(17) +#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14) +#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6) +#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0) +#define XGMAC_HW_FEATURE2 0x00000124 +#define XGMAC_HWFEAT_PPSOUTNUM GENMASK(26, 24) +#define XGMAC_HWFEAT_TXCHCNT GENMASK(21, 18) +#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12) +#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6) +#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0) +#define XGMAC_HW_FEATURE3 0x00000128 +#define XGMAC_HWFEAT_TBSSEL BIT(27) +#define XGMAC_HWFEAT_FPESEL BIT(26) +#define XGMAC_HWFEAT_ESTWID GENMASK(24, 23) +#define XGMAC_HWFEAT_ESTDEP GENMASK(22, 20) +#define XGMAC_HWFEAT_ESTSEL BIT(19) +#define XGMAC_HWFEAT_ASP GENMASK(15, 14) +#define XGMAC_HWFEAT_DVLAN BIT(13) +#define XGMAC_HWFEAT_FRPES GENMASK(12, 11) +#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9) +#define XGMAC_HWFEAT_FRPSEL BIT(3) +#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150 +#define XGMAC_MAC_FSM_CONTROL 0x00000158 +#define XGMAC_PRTYEN BIT(1) +#define XGMAC_TMOUTEN BIT(0) +#define XGMAC_MDIO_ADDR 0x00000200 +#define XGMAC_MDIO_DATA 0x00000204 +#define XGMAC_MDIO_C22P 0x00000220 +#define XGMAC_FPE_CTRL_STS 0x00000280 +#define XGMAC_EFPE BIT(0) +#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8) +#define XGMAC_ADDR_MAX 32 +#define XGMAC_AE BIT(31) +#define XGMAC_DCS GENMASK(19, 16) +#define XGMAC_DCS_SHIFT 16 +#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8) +#define XGMAC_L3L4_ADDR_CTRL 0x00000c00 +#define XGMAC_IDDR GENMASK(15, 8) +#define XGMAC_IDDR_SHIFT 8 +#define XGMAC_IDDR_FNUM 4 +#define XGMAC_TT BIT(1) +#define XGMAC_XB BIT(0) +#define XGMAC_L3L4_DATA 0x00000c04 +#define XGMAC_L3L4_CTRL 0x0 +#define XGMAC_L4DPIM0 BIT(21) +#define XGMAC_L4DPM0 BIT(20) +#define XGMAC_L4SPIM0 BIT(19) +#define XGMAC_L4SPM0 BIT(18) +#define XGMAC_L4PEN0 BIT(16) +#define XGMAC_L3HDBM0 GENMASK(15, 11) +#define XGMAC_L3HSBM0 GENMASK(10, 6) +#define XGMAC_L3DAIM0 BIT(5) +#define XGMAC_L3DAM0 BIT(4) +#define XGMAC_L3SAIM0 BIT(3) +#define XGMAC_L3SAM0 BIT(2) +#define XGMAC_L3PEN0 BIT(0) +#define XGMAC_L4_ADDR 0x1 +#define XGMAC_L4DP0 GENMASK(31, 16) +#define XGMAC_L4DP0_SHIFT 16 +#define XGMAC_L4SP0 GENMASK(15, 0) +#define XGMAC_L3_ADDR0 0x4 +#define XGMAC_L3_ADDR1 0x5 +#define XGMAC_L3_ADDR2 0x6 +#define XMGAC_L3_ADDR3 0x7 +#define XGMAC_ARP_ADDR 0x00000c10 +#define XGMAC_RSS_CTRL 0x00000c80 +#define XGMAC_UDP4TE BIT(3) +#define XGMAC_TCP4TE BIT(2) +#define XGMAC_IP2TE BIT(1) +#define XGMAC_RSSE BIT(0) +#define XGMAC_RSS_ADDR 0x00000c88 +#define XGMAC_RSSIA_SHIFT 8 +#define XGMAC_ADDRT BIT(2) +#define XGMAC_CT BIT(1) +#define XGMAC_OB BIT(0) +#define XGMAC_RSS_DATA 0x00000c8c +#define XGMAC_TIMESTAMP_STATUS 0x00000d20 +#define XGMAC_TXTSC BIT(15) +#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30 +#define XGMAC_TXTSSTSLO GENMASK(30, 0) +#define XGMAC_TXTIMESTAMP_SEC 0x00000d34 +#define XGMAC_PPS_CONTROL 0x00000d70 +#define XGMAC_PPS_MAXIDX(x) ((((x) + 1) * 8) - 1) +#define XGMAC_PPS_MINIDX(x) ((x) * 8) +#define XGMAC_PPSx_MASK(x) \ + GENMASK(XGMAC_PPS_MAXIDX(x), XGMAC_PPS_MINIDX(x)) +#define XGMAC_TRGTMODSELx(x, val) \ + GENMASK(XGMAC_PPS_MAXIDX(x) - 1, XGMAC_PPS_MAXIDX(x) - 2) & \ + ((val) << (XGMAC_PPS_MAXIDX(x) - 2)) +#define XGMAC_PPSCMDx(x, val) \ + GENMASK(XGMAC_PPS_MINIDX(x) + 3, XGMAC_PPS_MINIDX(x)) & \ + ((val) << XGMAC_PPS_MINIDX(x)) +#define XGMAC_PPSCMD_START 0x2 +#define XGMAC_PPSCMD_STOP 0x5 +#define XGMAC_PPSENx(x) BIT(4 + (x) * 8) +#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10) +#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10) +#define XGMAC_TRGTBUSY0 BIT(31) +#define XGMAC_PPSx_INTERVAL(x) (0x00000d88 + (x) * 0x10) +#define XGMAC_PPSx_WIDTH(x) (0x00000d8c + (x) * 0x10) + +/* MTL Registers */ +#define XGMAC_MTL_OPMODE 0x00001000 +#define XGMAC_FRPE BIT(15) +#define XGMAC_ETSALG GENMASK(6, 5) +#define XGMAC_WRR (0x0 << 5) +#define XGMAC_WFQ (0x1 << 5) +#define XGMAC_DWRR (0x2 << 5) +#define XGMAC_RAA BIT(2) +#define XGMAC_MTL_INT_STATUS 0x00001020 +#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030 +#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034 +#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 7, (x) * 8) +#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8) +#define XGMAC_QDDMACH BIT(7) +#define XGMAC_TC_PRTY_MAP0 0x00001040 +#define XGMAC_TC_PRTY_MAP1 0x00001044 +#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8) +#define XGMAC_PSTC_SHIFT(x) ((x) * 8) +#define XGMAC_MTL_EST_CONTROL 0x00001050 +#define XGMAC_PTOV GENMASK(31, 23) +#define XGMAC_PTOV_SHIFT 23 +#define XGMAC_SSWL BIT(1) +#define XGMAC_EEST BIT(0) +#define XGMAC_MTL_EST_GCL_CONTROL 0x00001080 +#define XGMAC_BTR_LOW 0x0 +#define XGMAC_BTR_HIGH 0x1 +#define XGMAC_CTR_LOW 0x2 +#define XGMAC_CTR_HIGH 0x3 +#define XGMAC_TER 0x4 +#define XGMAC_LLR 0x5 +#define XGMAC_ADDR_SHIFT 8 +#define XGMAC_GCRR BIT(2) +#define XGMAC_SRWO BIT(0) +#define XGMAC_MTL_EST_GCL_DATA 0x00001084 +#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0 +#define XGMAC_RXPI BIT(31) +#define XGMAC_NPE GENMASK(23, 16) +#define XGMAC_NVE GENMASK(7, 0) +#define XGMAC_MTL_RXP_IACC_CTRL_ST 0x000010b0 +#define XGMAC_STARTBUSY BIT(31) +#define XGMAC_WRRDN BIT(16) +#define XGMAC_ADDR GENMASK(9, 0) +#define XGMAC_MTL_RXP_IACC_DATA 0x000010b4 +#define XGMAC_MTL_ECC_CONTROL 0x000010c0 +#define XGMAC_MTL_SAFETY_INT_STATUS 0x000010c4 +#define XGMAC_MEUIS BIT(1) +#define XGMAC_MECIS BIT(0) +#define XGMAC_MTL_ECC_INT_ENABLE 0x000010c8 +#define XGMAC_RPCEIE BIT(12) +#define XGMAC_ECEIE BIT(8) +#define XGMAC_RXCEIE BIT(4) +#define XGMAC_TXCEIE BIT(0) +#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc +#define XGMAC_MTL_DPP_CONTROL 0x000010e0 +#define XGMAC_DPP_DISABLE BIT(0) +#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) +#define XGMAC_TQS GENMASK(25, 16) +#define XGMAC_TQS_SHIFT 16 +#define XGMAC_Q2TCMAP GENMASK(10, 8) +#define XGMAC_Q2TCMAP_SHIFT 8 +#define XGMAC_TTC GENMASK(6, 4) +#define XGMAC_TTC_SHIFT 4 +#define XGMAC_TXQEN GENMASK(3, 2) +#define XGMAC_TXQEN_SHIFT 2 +#define XGMAC_TSF BIT(1) +#define XGMAC_MTL_TCx_ETS_CONTROL(x) (0x00001110 + (0x80 * (x))) +#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x) (0x00001118 + (0x80 * (x))) +#define XGMAC_MTL_TCx_SENDSLOPE(x) (0x0000111c + (0x80 * (x))) +#define XGMAC_MTL_TCx_HICREDIT(x) (0x00001120 + (0x80 * (x))) +#define XGMAC_MTL_TCx_LOCREDIT(x) (0x00001124 + (0x80 * (x))) +#define XGMAC_CC BIT(3) +#define XGMAC_TSA GENMASK(1, 0) +#define XGMAC_SP (0x0 << 0) +#define XGMAC_CBS (0x1 << 0) +#define XGMAC_ETS (0x2 << 0) +#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x))) +#define XGMAC_RQS GENMASK(25, 16) +#define XGMAC_RQS_SHIFT 16 +#define XGMAC_EHFC BIT(7) +#define XGMAC_RSF BIT(5) +#define XGMAC_RTC GENMASK(1, 0) +#define XGMAC_RTC_SHIFT 0 +#define XGMAC_MTL_RXQ_FLOW_CONTROL(x) (0x00001150 + (0x80 * (x))) +#define XGMAC_RFD GENMASK(31, 17) +#define XGMAC_RFD_SHIFT 17 +#define XGMAC_RFA GENMASK(15, 1) +#define XGMAC_RFA_SHIFT 1 +#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x))) +#define XGMAC_RXOIE BIT(16) +#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x))) +#define XGMAC_RXOVFIS BIT(16) +#define XGMAC_ABPSIS BIT(1) +#define XGMAC_TXUNFIS BIT(0) +#define XGMAC_MAC_REGSIZE (XGMAC_MTL_QINT_STATUS(15) / 4) + +/* DMA Registers */ +#define XGMAC_DMA_MODE 0x00003000 +#define XGMAC_SWR BIT(0) +#define XGMAC_DMA_SYSBUS_MODE 0x00003004 +#define XGMAC_WR_OSR_LMT GENMASK(29, 24) +#define XGMAC_WR_OSR_LMT_SHIFT 24 +#define XGMAC_RD_OSR_LMT GENMASK(21, 16) +#define XGMAC_RD_OSR_LMT_SHIFT 16 +#define XGMAC_EN_LPI BIT(15) +#define XGMAC_LPI_XIT_PKT BIT(14) +#define XGMAC_AAL BIT(12) +#define XGMAC_EAME BIT(11) +#define XGMAC_BLEN GENMASK(7, 1) +#define XGMAC_BLEN256 BIT(7) +#define XGMAC_BLEN128 BIT(6) +#define XGMAC_BLEN64 BIT(5) +#define XGMAC_BLEN32 BIT(4) +#define XGMAC_BLEN16 BIT(3) +#define XGMAC_BLEN8 BIT(2) +#define XGMAC_BLEN4 BIT(1) +#define XGMAC_UNDEF BIT(0) +#define XGMAC_TX_EDMA_CTRL 0x00003040 +#define XGMAC_TDPS GENMASK(29, 0) +#define XGMAC_RX_EDMA_CTRL 0x00003044 +#define XGMAC_RDPS GENMASK(29, 0) +#define XGMAC_DMA_TBS_CTRL0 0x00003054 +#define XGMAC_DMA_TBS_CTRL1 0x00003058 +#define XGMAC_DMA_TBS_CTRL2 0x0000305c +#define XGMAC_DMA_TBS_CTRL3 0x00003060 +#define XGMAC_FTOS GENMASK(31, 8) +#define XGMAC_FTOV BIT(0) +#define XGMAC_DEF_FTOS (XGMAC_FTOS | XGMAC_FTOV) +#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064 +#define XGMAC_MCSIS BIT(31) +#define XGMAC_MSUIS BIT(29) +#define XGMAC_MSCIS BIT(28) +#define XGMAC_DEUIS BIT(1) +#define XGMAC_DECIS BIT(0) +#define XGMAC_DMA_ECC_INT_ENABLE 0x00003068 +#define XGMAC_DCEIE BIT(1) +#define XGMAC_TCEIE BIT(0) +#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c +#define XGMAC_DMA_DPP_INT_STATUS 0x00003074 +#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x))) +#define XGMAC_SPH BIT(24) +#define XGMAC_PBLx8 BIT(16) +#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x))) +#define XGMAC_EDSE BIT(28) +#define XGMAC_TxPBL GENMASK(21, 16) +#define XGMAC_TxPBL_SHIFT 16 +#define XGMAC_TSE BIT(12) +#define XGMAC_OSP BIT(4) +#define XGMAC_TXST BIT(0) +#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x))) +#define XGMAC_RxPBL GENMASK(21, 16) +#define XGMAC_RxPBL_SHIFT 16 +#define XGMAC_RBSZ GENMASK(14, 1) +#define XGMAC_RBSZ_SHIFT 1 +#define XGMAC_RXST BIT(0) +#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x))) +#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_HADDR(x) (0x00003118 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x))) +#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x))) +#define XGMAC_DMA_CH_TxDESC_RING_LEN(x) (0x00003130 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_RING_LEN(x) (0x00003134 + (0x80 * (x))) +#define XGMAC_DMA_CH_INT_EN(x) (0x00003138 + (0x80 * (x))) +#define XGMAC_NIE BIT(15) +#define XGMAC_AIE BIT(14) +#define XGMAC_RBUE BIT(7) +#define XGMAC_RIE BIT(6) +#define XGMAC_TBUE BIT(2) +#define XGMAC_TIE BIT(0) +#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \ + XGMAC_RIE | XGMAC_TIE) +#define XGMAC_DMA_INT_DEFAULT_RX (XGMAC_RBUE | XGMAC_RIE) +#define XGMAC_DMA_INT_DEFAULT_TX (XGMAC_TIE) +#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x))) +#define XGMAC_RWT GENMASK(7, 0) +#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x))) +#define XGMAC_NIS BIT(15) +#define XGMAC_AIS BIT(14) +#define XGMAC_FBE BIT(12) +#define XGMAC_RBU BIT(7) +#define XGMAC_RI BIT(6) +#define XGMAC_TBU BIT(2) +#define XGMAC_TPS BIT(1) +#define XGMAC_TI BIT(0) +#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4) + +#define XGMAC_DMA_STATUS_MSK_COMMON (XGMAC_NIS | XGMAC_AIS | XGMAC_FBE) +#define XGMAC_DMA_STATUS_MSK_RX (XGMAC_RBU | XGMAC_RI | \ + XGMAC_DMA_STATUS_MSK_COMMON) +#define XGMAC_DMA_STATUS_MSK_TX (XGMAC_TBU | XGMAC_TPS | XGMAC_TI | \ + XGMAC_DMA_STATUS_MSK_COMMON) + +/* Descriptors */ +#define XGMAC_TDES0_LTV BIT(31) +#define XGMAC_TDES0_LT GENMASK(7, 0) +#define XGMAC_TDES1_LT GENMASK(31, 8) +#define XGMAC_TDES2_IVT GENMASK(31, 16) +#define XGMAC_TDES2_IVT_SHIFT 16 +#define XGMAC_TDES2_IOC BIT(31) +#define XGMAC_TDES2_TTSE BIT(30) +#define XGMAC_TDES2_B2L GENMASK(29, 16) +#define XGMAC_TDES2_B2L_SHIFT 16 +#define XGMAC_TDES2_VTIR GENMASK(15, 14) +#define XGMAC_TDES2_VTIR_SHIFT 14 +#define XGMAC_TDES2_B1L GENMASK(13, 0) +#define XGMAC_TDES3_OWN BIT(31) +#define XGMAC_TDES3_CTXT BIT(30) +#define XGMAC_TDES3_FD BIT(29) +#define XGMAC_TDES3_LD BIT(28) +#define XGMAC_TDES3_CPC GENMASK(27, 26) +#define XGMAC_TDES3_CPC_SHIFT 26 +#define XGMAC_TDES3_TCMSSV BIT(26) +#define XGMAC_TDES3_SAIC GENMASK(25, 23) +#define XGMAC_TDES3_SAIC_SHIFT 23 +#define XGMAC_TDES3_TBSV BIT(24) +#define XGMAC_TDES3_THL GENMASK(22, 19) +#define XGMAC_TDES3_THL_SHIFT 19 +#define XGMAC_TDES3_IVTIR GENMASK(19, 18) +#define XGMAC_TDES3_IVTIR_SHIFT 18 +#define XGMAC_TDES3_TSE BIT(18) +#define XGMAC_TDES3_IVLTV BIT(17) +#define XGMAC_TDES3_CIC GENMASK(17, 16) +#define XGMAC_TDES3_CIC_SHIFT 16 +#define XGMAC_TDES3_TPL GENMASK(17, 0) +#define XGMAC_TDES3_VLTV BIT(16) +#define XGMAC_TDES3_VT GENMASK(15, 0) +#define XGMAC_TDES3_FL GENMASK(14, 0) +#define XGMAC_RDES2_HL GENMASK(9, 0) +#define XGMAC_RDES3_OWN BIT(31) +#define XGMAC_RDES3_CTXT BIT(30) +#define XGMAC_RDES3_IOC BIT(30) +#define XGMAC_RDES3_LD BIT(28) +#define XGMAC_RDES3_CDA BIT(27) +#define XGMAC_RDES3_RSV BIT(26) +#define XGMAC_RDES3_L34T GENMASK(23, 20) +#define XGMAC_RDES3_L34T_SHIFT 20 +#define XGMAC_L34T_IP4TCP 0x1 +#define XGMAC_L34T_IP4UDP 0x2 +#define XGMAC_L34T_IP6TCP 0x9 +#define XGMAC_L34T_IP6UDP 0xA +#define XGMAC_RDES3_ES BIT(15) +#define XGMAC_RDES3_PL GENMASK(13, 0) +#define XGMAC_RDES3_TSD BIT(6) +#define XGMAC_RDES3_TSA BIT(4) + +#endif /* __STMMAC_DWXGMAC2_H__ */ diff --git a/devices/stmmac/dwxgmac2-6.1-orig.h b/devices/stmmac/dwxgmac2-6.1-orig.h new file mode 100644 index 00000000..8748c37e --- /dev/null +++ b/devices/stmmac/dwxgmac2-6.1-orig.h @@ -0,0 +1,477 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC definitions. + */ + +#ifndef __STMMAC_DWXGMAC2_H__ +#define __STMMAC_DWXGMAC2_H__ + +#include "common.h" + +/* Misc */ +#define XGMAC_JUMBO_LEN 16368 + +/* MAC Registers */ +#define XGMAC_TX_CONFIG 0x00000000 +#define XGMAC_CONFIG_SS_OFF 29 +#define XGMAC_CONFIG_SS_MASK GENMASK(31, 29) +#define XGMAC_CONFIG_SS_10000 (0x0 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_2500_GMII (0x2 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_1000_GMII (0x3 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_100_MII (0x4 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_5000 (0x5 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_2500 (0x6 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SS_10_MII (0x7 << XGMAC_CONFIG_SS_OFF) +#define XGMAC_CONFIG_SARC GENMASK(22, 20) +#define XGMAC_CONFIG_SARC_SHIFT 20 +#define XGMAC_CONFIG_JD BIT(16) +#define XGMAC_CONFIG_TE BIT(0) +#define XGMAC_CORE_INIT_TX (XGMAC_CONFIG_JD) +#define XGMAC_RX_CONFIG 0x00000004 +#define XGMAC_CONFIG_ARPEN BIT(31) +#define XGMAC_CONFIG_GPSL GENMASK(29, 16) +#define XGMAC_CONFIG_GPSL_SHIFT 16 +#define XGMAC_CONFIG_HDSMS GENMASK(14, 12) +#define XGMAC_CONFIG_HDSMS_SHIFT 12 +#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT) +#define XGMAC_CONFIG_S2KP BIT(11) +#define XGMAC_CONFIG_LM BIT(10) +#define XGMAC_CONFIG_IPC BIT(9) +#define XGMAC_CONFIG_JE BIT(8) +#define XGMAC_CONFIG_WD BIT(7) +#define XGMAC_CONFIG_GPSLCE BIT(6) +#define XGMAC_CONFIG_CST BIT(2) +#define XGMAC_CONFIG_ACS BIT(1) +#define XGMAC_CONFIG_RE BIT(0) +#define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | XGMAC_CONFIG_WD | \ + (XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT)) +#define XGMAC_PACKET_FILTER 0x00000008 +#define XGMAC_FILTER_RA BIT(31) +#define XGMAC_FILTER_IPFE BIT(20) +#define XGMAC_FILTER_VTFE BIT(16) +#define XGMAC_FILTER_HPF BIT(10) +#define XGMAC_FILTER_PCF BIT(7) +#define XGMAC_FILTER_PM BIT(4) +#define XGMAC_FILTER_HMC BIT(2) +#define XGMAC_FILTER_PR BIT(0) +#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4) +#define XGMAC_MAX_HASH_TABLE 8 +#define XGMAC_VLAN_TAG 0x00000050 +#define XGMAC_VLAN_EDVLP BIT(26) +#define XGMAC_VLAN_VTHM BIT(25) +#define XGMAC_VLAN_DOVLTC BIT(20) +#define XGMAC_VLAN_ESVL BIT(18) +#define XGMAC_VLAN_ETV BIT(16) +#define XGMAC_VLAN_VID GENMASK(15, 0) +#define XGMAC_VLAN_HASH_TABLE 0x00000058 +#define XGMAC_VLAN_INCL 0x00000060 +#define XGMAC_VLAN_VLTI BIT(20) +#define XGMAC_VLAN_CSVL BIT(19) +#define XGMAC_VLAN_VLC GENMASK(17, 16) +#define XGMAC_VLAN_VLC_SHIFT 16 +#define XGMAC_RXQ_CTRL0 0x000000a0 +#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) +#define XGMAC_RXQEN_SHIFT(x) ((x) * 2) +#define XGMAC_RXQ_CTRL1 0x000000a4 +#define XGMAC_RQ GENMASK(7, 4) +#define XGMAC_RQ_SHIFT 4 +#define XGMAC_RXQ_CTRL2 0x000000a8 +#define XGMAC_RXQ_CTRL3 0x000000ac +#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8) +#define XGMAC_PSRQ_SHIFT(x) ((x) * 8) +#define XGMAC_INT_STATUS 0x000000b0 +#define XGMAC_LPIIS BIT(5) +#define XGMAC_PMTIS BIT(4) +#define XGMAC_INT_EN 0x000000b4 +#define XGMAC_TSIE BIT(12) +#define XGMAC_LPIIE BIT(5) +#define XGMAC_PMTIE BIT(4) +#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE) +#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4) +#define XGMAC_PT GENMASK(31, 16) +#define XGMAC_PT_SHIFT 16 +#define XGMAC_TFE BIT(1) +#define XGMAC_RX_FLOW_CTRL 0x00000090 +#define XGMAC_RFE BIT(0) +#define XGMAC_PMT 0x000000c0 +#define XGMAC_GLBLUCAST BIT(9) +#define XGMAC_RWKPKTEN BIT(2) +#define XGMAC_MGKPKTEN BIT(1) +#define XGMAC_PWRDWN BIT(0) +#define XGMAC_LPI_CTRL 0x000000d0 +#define XGMAC_TXCGE BIT(21) +#define XGMAC_LPITXA BIT(19) +#define XGMAC_PLS BIT(17) +#define XGMAC_LPITXEN BIT(16) +#define XGMAC_RLPIEX BIT(3) +#define XGMAC_RLPIEN BIT(2) +#define XGMAC_TLPIEX BIT(1) +#define XGMAC_TLPIEN BIT(0) +#define XGMAC_LPI_TIMER_CTRL 0x000000d4 +#define XGMAC_HW_FEATURE0 0x0000011c +#define XGMAC_HWFEAT_SAVLANINS BIT(27) +#define XGMAC_HWFEAT_RXCOESEL BIT(16) +#define XGMAC_HWFEAT_TXCOESEL BIT(14) +#define XGMAC_HWFEAT_EEESEL BIT(13) +#define XGMAC_HWFEAT_TSSEL BIT(12) +#define XGMAC_HWFEAT_AVSEL BIT(11) +#define XGMAC_HWFEAT_RAVSEL BIT(10) +#define XGMAC_HWFEAT_ARPOFFSEL BIT(9) +#define XGMAC_HWFEAT_MMCSEL BIT(8) +#define XGMAC_HWFEAT_MGKSEL BIT(7) +#define XGMAC_HWFEAT_RWKSEL BIT(6) +#define XGMAC_HWFEAT_VLHASH BIT(4) +#define XGMAC_HWFEAT_GMIISEL BIT(1) +#define XGMAC_HW_FEATURE1 0x00000120 +#define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27) +#define XGMAC_HWFEAT_HASHTBLSZ GENMASK(25, 24) +#define XGMAC_HWFEAT_RSSEN BIT(20) +#define XGMAC_HWFEAT_TSOEN BIT(18) +#define XGMAC_HWFEAT_SPHEN BIT(17) +#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14) +#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6) +#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0) +#define XGMAC_HW_FEATURE2 0x00000124 +#define XGMAC_HWFEAT_PPSOUTNUM GENMASK(26, 24) +#define XGMAC_HWFEAT_TXCHCNT GENMASK(21, 18) +#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12) +#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6) +#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0) +#define XGMAC_HW_FEATURE3 0x00000128 +#define XGMAC_HWFEAT_TBSSEL BIT(27) +#define XGMAC_HWFEAT_FPESEL BIT(26) +#define XGMAC_HWFEAT_ESTWID GENMASK(24, 23) +#define XGMAC_HWFEAT_ESTDEP GENMASK(22, 20) +#define XGMAC_HWFEAT_ESTSEL BIT(19) +#define XGMAC_HWFEAT_ASP GENMASK(15, 14) +#define XGMAC_HWFEAT_DVLAN BIT(13) +#define XGMAC_HWFEAT_FRPES GENMASK(12, 11) +#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9) +#define XGMAC_HWFEAT_FRPSEL BIT(3) +#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150 +#define XGMAC_MAC_FSM_CONTROL 0x00000158 +#define XGMAC_PRTYEN BIT(1) +#define XGMAC_TMOUTEN BIT(0) +#define XGMAC_MDIO_ADDR 0x00000200 +#define XGMAC_MDIO_DATA 0x00000204 +#define XGMAC_MDIO_C22P 0x00000220 +#define XGMAC_FPE_CTRL_STS 0x00000280 +#define XGMAC_EFPE BIT(0) +#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8) +#define XGMAC_ADDR_MAX 32 +#define XGMAC_AE BIT(31) +#define XGMAC_DCS GENMASK(19, 16) +#define XGMAC_DCS_SHIFT 16 +#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8) +#define XGMAC_L3L4_ADDR_CTRL 0x00000c00 +#define XGMAC_IDDR GENMASK(15, 8) +#define XGMAC_IDDR_SHIFT 8 +#define XGMAC_IDDR_FNUM 4 +#define XGMAC_TT BIT(1) +#define XGMAC_XB BIT(0) +#define XGMAC_L3L4_DATA 0x00000c04 +#define XGMAC_L3L4_CTRL 0x0 +#define XGMAC_L4DPIM0 BIT(21) +#define XGMAC_L4DPM0 BIT(20) +#define XGMAC_L4SPIM0 BIT(19) +#define XGMAC_L4SPM0 BIT(18) +#define XGMAC_L4PEN0 BIT(16) +#define XGMAC_L3HDBM0 GENMASK(15, 11) +#define XGMAC_L3HSBM0 GENMASK(10, 6) +#define XGMAC_L3DAIM0 BIT(5) +#define XGMAC_L3DAM0 BIT(4) +#define XGMAC_L3SAIM0 BIT(3) +#define XGMAC_L3SAM0 BIT(2) +#define XGMAC_L3PEN0 BIT(0) +#define XGMAC_L4_ADDR 0x1 +#define XGMAC_L4DP0 GENMASK(31, 16) +#define XGMAC_L4DP0_SHIFT 16 +#define XGMAC_L4SP0 GENMASK(15, 0) +#define XGMAC_L3_ADDR0 0x4 +#define XGMAC_L3_ADDR1 0x5 +#define XGMAC_L3_ADDR2 0x6 +#define XMGAC_L3_ADDR3 0x7 +#define XGMAC_ARP_ADDR 0x00000c10 +#define XGMAC_RSS_CTRL 0x00000c80 +#define XGMAC_UDP4TE BIT(3) +#define XGMAC_TCP4TE BIT(2) +#define XGMAC_IP2TE BIT(1) +#define XGMAC_RSSE BIT(0) +#define XGMAC_RSS_ADDR 0x00000c88 +#define XGMAC_RSSIA_SHIFT 8 +#define XGMAC_ADDRT BIT(2) +#define XGMAC_CT BIT(1) +#define XGMAC_OB BIT(0) +#define XGMAC_RSS_DATA 0x00000c8c +#define XGMAC_TIMESTAMP_STATUS 0x00000d20 +#define XGMAC_TXTSC BIT(15) +#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30 +#define XGMAC_TXTSSTSLO GENMASK(30, 0) +#define XGMAC_TXTIMESTAMP_SEC 0x00000d34 +#define XGMAC_PPS_CONTROL 0x00000d70 +#define XGMAC_PPS_MAXIDX(x) ((((x) + 1) * 8) - 1) +#define XGMAC_PPS_MINIDX(x) ((x) * 8) +#define XGMAC_PPSx_MASK(x) \ + GENMASK(XGMAC_PPS_MAXIDX(x), XGMAC_PPS_MINIDX(x)) +#define XGMAC_TRGTMODSELx(x, val) \ + GENMASK(XGMAC_PPS_MAXIDX(x) - 1, XGMAC_PPS_MAXIDX(x) - 2) & \ + ((val) << (XGMAC_PPS_MAXIDX(x) - 2)) +#define XGMAC_PPSCMDx(x, val) \ + GENMASK(XGMAC_PPS_MINIDX(x) + 3, XGMAC_PPS_MINIDX(x)) & \ + ((val) << XGMAC_PPS_MINIDX(x)) +#define XGMAC_PPSCMD_START 0x2 +#define XGMAC_PPSCMD_STOP 0x5 +#define XGMAC_PPSENx(x) BIT(4 + (x) * 8) +#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10) +#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10) +#define XGMAC_TRGTBUSY0 BIT(31) +#define XGMAC_PPSx_INTERVAL(x) (0x00000d88 + (x) * 0x10) +#define XGMAC_PPSx_WIDTH(x) (0x00000d8c + (x) * 0x10) + +/* MTL Registers */ +#define XGMAC_MTL_OPMODE 0x00001000 +#define XGMAC_FRPE BIT(15) +#define XGMAC_ETSALG GENMASK(6, 5) +#define XGMAC_WRR (0x0 << 5) +#define XGMAC_WFQ (0x1 << 5) +#define XGMAC_DWRR (0x2 << 5) +#define XGMAC_RAA BIT(2) +#define XGMAC_MTL_INT_STATUS 0x00001020 +#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030 +#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034 +#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 7, (x) * 8) +#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8) +#define XGMAC_QDDMACH BIT(7) +#define XGMAC_TC_PRTY_MAP0 0x00001040 +#define XGMAC_TC_PRTY_MAP1 0x00001044 +#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8) +#define XGMAC_PSTC_SHIFT(x) ((x) * 8) +#define XGMAC_MTL_EST_CONTROL 0x00001050 +#define XGMAC_PTOV GENMASK(31, 23) +#define XGMAC_PTOV_SHIFT 23 +#define XGMAC_SSWL BIT(1) +#define XGMAC_EEST BIT(0) +#define XGMAC_MTL_EST_GCL_CONTROL 0x00001080 +#define XGMAC_BTR_LOW 0x0 +#define XGMAC_BTR_HIGH 0x1 +#define XGMAC_CTR_LOW 0x2 +#define XGMAC_CTR_HIGH 0x3 +#define XGMAC_TER 0x4 +#define XGMAC_LLR 0x5 +#define XGMAC_ADDR_SHIFT 8 +#define XGMAC_GCRR BIT(2) +#define XGMAC_SRWO BIT(0) +#define XGMAC_MTL_EST_GCL_DATA 0x00001084 +#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0 +#define XGMAC_RXPI BIT(31) +#define XGMAC_NPE GENMASK(23, 16) +#define XGMAC_NVE GENMASK(7, 0) +#define XGMAC_MTL_RXP_IACC_CTRL_ST 0x000010b0 +#define XGMAC_STARTBUSY BIT(31) +#define XGMAC_WRRDN BIT(16) +#define XGMAC_ADDR GENMASK(9, 0) +#define XGMAC_MTL_RXP_IACC_DATA 0x000010b4 +#define XGMAC_MTL_ECC_CONTROL 0x000010c0 +#define XGMAC_MTL_SAFETY_INT_STATUS 0x000010c4 +#define XGMAC_MEUIS BIT(1) +#define XGMAC_MECIS BIT(0) +#define XGMAC_MTL_ECC_INT_ENABLE 0x000010c8 +#define XGMAC_RPCEIE BIT(12) +#define XGMAC_ECEIE BIT(8) +#define XGMAC_RXCEIE BIT(4) +#define XGMAC_TXCEIE BIT(0) +#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc +#define XGMAC_MTL_DPP_CONTROL 0x000010e0 +#define XGMAC_DPP_DISABLE BIT(0) +#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) +#define XGMAC_TQS GENMASK(25, 16) +#define XGMAC_TQS_SHIFT 16 +#define XGMAC_Q2TCMAP GENMASK(10, 8) +#define XGMAC_Q2TCMAP_SHIFT 8 +#define XGMAC_TTC GENMASK(6, 4) +#define XGMAC_TTC_SHIFT 4 +#define XGMAC_TXQEN GENMASK(3, 2) +#define XGMAC_TXQEN_SHIFT 2 +#define XGMAC_TSF BIT(1) +#define XGMAC_MTL_TCx_ETS_CONTROL(x) (0x00001110 + (0x80 * (x))) +#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x) (0x00001118 + (0x80 * (x))) +#define XGMAC_MTL_TCx_SENDSLOPE(x) (0x0000111c + (0x80 * (x))) +#define XGMAC_MTL_TCx_HICREDIT(x) (0x00001120 + (0x80 * (x))) +#define XGMAC_MTL_TCx_LOCREDIT(x) (0x00001124 + (0x80 * (x))) +#define XGMAC_CC BIT(3) +#define XGMAC_TSA GENMASK(1, 0) +#define XGMAC_SP (0x0 << 0) +#define XGMAC_CBS (0x1 << 0) +#define XGMAC_ETS (0x2 << 0) +#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x))) +#define XGMAC_RQS GENMASK(25, 16) +#define XGMAC_RQS_SHIFT 16 +#define XGMAC_EHFC BIT(7) +#define XGMAC_RSF BIT(5) +#define XGMAC_RTC GENMASK(1, 0) +#define XGMAC_RTC_SHIFT 0 +#define XGMAC_MTL_RXQ_FLOW_CONTROL(x) (0x00001150 + (0x80 * (x))) +#define XGMAC_RFD GENMASK(31, 17) +#define XGMAC_RFD_SHIFT 17 +#define XGMAC_RFA GENMASK(15, 1) +#define XGMAC_RFA_SHIFT 1 +#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x))) +#define XGMAC_RXOIE BIT(16) +#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x))) +#define XGMAC_RXOVFIS BIT(16) +#define XGMAC_ABPSIS BIT(1) +#define XGMAC_TXUNFIS BIT(0) +#define XGMAC_MAC_REGSIZE (XGMAC_MTL_QINT_STATUS(15) / 4) + +/* DMA Registers */ +#define XGMAC_DMA_MODE 0x00003000 +#define XGMAC_SWR BIT(0) +#define XGMAC_DMA_SYSBUS_MODE 0x00003004 +#define XGMAC_WR_OSR_LMT GENMASK(29, 24) +#define XGMAC_WR_OSR_LMT_SHIFT 24 +#define XGMAC_RD_OSR_LMT GENMASK(21, 16) +#define XGMAC_RD_OSR_LMT_SHIFT 16 +#define XGMAC_EN_LPI BIT(15) +#define XGMAC_LPI_XIT_PKT BIT(14) +#define XGMAC_AAL BIT(12) +#define XGMAC_EAME BIT(11) +#define XGMAC_BLEN GENMASK(7, 1) +#define XGMAC_BLEN256 BIT(7) +#define XGMAC_BLEN128 BIT(6) +#define XGMAC_BLEN64 BIT(5) +#define XGMAC_BLEN32 BIT(4) +#define XGMAC_BLEN16 BIT(3) +#define XGMAC_BLEN8 BIT(2) +#define XGMAC_BLEN4 BIT(1) +#define XGMAC_UNDEF BIT(0) +#define XGMAC_TX_EDMA_CTRL 0x00003040 +#define XGMAC_TDPS GENMASK(29, 0) +#define XGMAC_RX_EDMA_CTRL 0x00003044 +#define XGMAC_RDPS GENMASK(29, 0) +#define XGMAC_DMA_TBS_CTRL0 0x00003054 +#define XGMAC_DMA_TBS_CTRL1 0x00003058 +#define XGMAC_DMA_TBS_CTRL2 0x0000305c +#define XGMAC_DMA_TBS_CTRL3 0x00003060 +#define XGMAC_FTOS GENMASK(31, 8) +#define XGMAC_FTOV BIT(0) +#define XGMAC_DEF_FTOS (XGMAC_FTOS | XGMAC_FTOV) +#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064 +#define XGMAC_MCSIS BIT(31) +#define XGMAC_MSUIS BIT(29) +#define XGMAC_MSCIS BIT(28) +#define XGMAC_DEUIS BIT(1) +#define XGMAC_DECIS BIT(0) +#define XGMAC_DMA_ECC_INT_ENABLE 0x00003068 +#define XGMAC_DCEIE BIT(1) +#define XGMAC_TCEIE BIT(0) +#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c +#define XGMAC_DMA_DPP_INT_STATUS 0x00003074 +#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x))) +#define XGMAC_SPH BIT(24) +#define XGMAC_PBLx8 BIT(16) +#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x))) +#define XGMAC_EDSE BIT(28) +#define XGMAC_TxPBL GENMASK(21, 16) +#define XGMAC_TxPBL_SHIFT 16 +#define XGMAC_TSE BIT(12) +#define XGMAC_OSP BIT(4) +#define XGMAC_TXST BIT(0) +#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x))) +#define XGMAC_RxPBL GENMASK(21, 16) +#define XGMAC_RxPBL_SHIFT 16 +#define XGMAC_RBSZ GENMASK(14, 1) +#define XGMAC_RBSZ_SHIFT 1 +#define XGMAC_RXST BIT(0) +#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x))) +#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_HADDR(x) (0x00003118 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x))) +#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x))) +#define XGMAC_DMA_CH_TxDESC_RING_LEN(x) (0x00003130 + (0x80 * (x))) +#define XGMAC_DMA_CH_RxDESC_RING_LEN(x) (0x00003134 + (0x80 * (x))) +#define XGMAC_DMA_CH_INT_EN(x) (0x00003138 + (0x80 * (x))) +#define XGMAC_NIE BIT(15) +#define XGMAC_AIE BIT(14) +#define XGMAC_RBUE BIT(7) +#define XGMAC_RIE BIT(6) +#define XGMAC_TBUE BIT(2) +#define XGMAC_TIE BIT(0) +#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \ + XGMAC_RIE | XGMAC_TIE) +#define XGMAC_DMA_INT_DEFAULT_RX (XGMAC_RBUE | XGMAC_RIE) +#define XGMAC_DMA_INT_DEFAULT_TX (XGMAC_TIE) +#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x))) +#define XGMAC_RWT GENMASK(7, 0) +#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x))) +#define XGMAC_NIS BIT(15) +#define XGMAC_AIS BIT(14) +#define XGMAC_FBE BIT(12) +#define XGMAC_RBU BIT(7) +#define XGMAC_RI BIT(6) +#define XGMAC_TBU BIT(2) +#define XGMAC_TPS BIT(1) +#define XGMAC_TI BIT(0) +#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4) + +#define XGMAC_DMA_STATUS_MSK_COMMON (XGMAC_NIS | XGMAC_AIS | XGMAC_FBE) +#define XGMAC_DMA_STATUS_MSK_RX (XGMAC_RBU | XGMAC_RI | \ + XGMAC_DMA_STATUS_MSK_COMMON) +#define XGMAC_DMA_STATUS_MSK_TX (XGMAC_TBU | XGMAC_TPS | XGMAC_TI | \ + XGMAC_DMA_STATUS_MSK_COMMON) + +/* Descriptors */ +#define XGMAC_TDES0_LTV BIT(31) +#define XGMAC_TDES0_LT GENMASK(7, 0) +#define XGMAC_TDES1_LT GENMASK(31, 8) +#define XGMAC_TDES2_IVT GENMASK(31, 16) +#define XGMAC_TDES2_IVT_SHIFT 16 +#define XGMAC_TDES2_IOC BIT(31) +#define XGMAC_TDES2_TTSE BIT(30) +#define XGMAC_TDES2_B2L GENMASK(29, 16) +#define XGMAC_TDES2_B2L_SHIFT 16 +#define XGMAC_TDES2_VTIR GENMASK(15, 14) +#define XGMAC_TDES2_VTIR_SHIFT 14 +#define XGMAC_TDES2_B1L GENMASK(13, 0) +#define XGMAC_TDES3_OWN BIT(31) +#define XGMAC_TDES3_CTXT BIT(30) +#define XGMAC_TDES3_FD BIT(29) +#define XGMAC_TDES3_LD BIT(28) +#define XGMAC_TDES3_CPC GENMASK(27, 26) +#define XGMAC_TDES3_CPC_SHIFT 26 +#define XGMAC_TDES3_TCMSSV BIT(26) +#define XGMAC_TDES3_SAIC GENMASK(25, 23) +#define XGMAC_TDES3_SAIC_SHIFT 23 +#define XGMAC_TDES3_TBSV BIT(24) +#define XGMAC_TDES3_THL GENMASK(22, 19) +#define XGMAC_TDES3_THL_SHIFT 19 +#define XGMAC_TDES3_IVTIR GENMASK(19, 18) +#define XGMAC_TDES3_IVTIR_SHIFT 18 +#define XGMAC_TDES3_TSE BIT(18) +#define XGMAC_TDES3_IVLTV BIT(17) +#define XGMAC_TDES3_CIC GENMASK(17, 16) +#define XGMAC_TDES3_CIC_SHIFT 16 +#define XGMAC_TDES3_TPL GENMASK(17, 0) +#define XGMAC_TDES3_VLTV BIT(16) +#define XGMAC_TDES3_VT GENMASK(15, 0) +#define XGMAC_TDES3_FL GENMASK(14, 0) +#define XGMAC_RDES2_HL GENMASK(9, 0) +#define XGMAC_RDES3_OWN BIT(31) +#define XGMAC_RDES3_CTXT BIT(30) +#define XGMAC_RDES3_IOC BIT(30) +#define XGMAC_RDES3_LD BIT(28) +#define XGMAC_RDES3_CDA BIT(27) +#define XGMAC_RDES3_RSV BIT(26) +#define XGMAC_RDES3_L34T GENMASK(23, 20) +#define XGMAC_RDES3_L34T_SHIFT 20 +#define XGMAC_L34T_IP4TCP 0x1 +#define XGMAC_L34T_IP4UDP 0x2 +#define XGMAC_L34T_IP6TCP 0x9 +#define XGMAC_L34T_IP6UDP 0xA +#define XGMAC_RDES3_ES BIT(15) +#define XGMAC_RDES3_PL GENMASK(13, 0) +#define XGMAC_RDES3_TSD BIT(6) +#define XGMAC_RDES3_TSA BIT(4) + +#endif /* __STMMAC_DWXGMAC2_H__ */ diff --git a/devices/stmmac/dwxgmac2_core-6.1-ethercat.c b/devices/stmmac/dwxgmac2_core-6.1-ethercat.c new file mode 100644 index 00000000..fdb4f83f --- /dev/null +++ b/devices/stmmac/dwxgmac2_core-6.1-ethercat.c @@ -0,0 +1,1705 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC support. + */ + +#include +#include +#include +#include "stmmac-6.1-ethercat.h" +#include "stmmac_ptp-6.1-ethercat.h" +#include "dwxlgmac2-6.1-ethercat.h" +#include "dwxgmac2-6.1-ethercat.h" + +static void dwxgmac2_core_init(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = hw->pcsr; + u32 tx, rx; + + tx = readl(ioaddr + XGMAC_TX_CONFIG); + rx = readl(ioaddr + XGMAC_RX_CONFIG); + + tx |= XGMAC_CORE_INIT_TX; + rx |= XGMAC_CORE_INIT_RX; + + if (hw->ps) { + tx |= XGMAC_CONFIG_TE; + tx &= ~hw->link.speed_mask; + + switch (hw->ps) { + case SPEED_10000: + tx |= hw->link.xgmii.speed10000; + break; + case SPEED_2500: + tx |= hw->link.speed2500; + break; + case SPEED_1000: + default: + tx |= hw->link.speed1000; + break; + } + } + + writel(tx, ioaddr + XGMAC_TX_CONFIG); + writel(rx, ioaddr + XGMAC_RX_CONFIG); + writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); +} + +static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 tx = readl(ioaddr + XGMAC_TX_CONFIG); + u32 rx = readl(ioaddr + XGMAC_RX_CONFIG); + + if (enable) { + tx |= XGMAC_CONFIG_TE; + rx |= XGMAC_CONFIG_RE; + } else { + tx &= ~XGMAC_CONFIG_TE; + rx &= ~XGMAC_CONFIG_RE; + } + + writel(tx, ioaddr + XGMAC_TX_CONFIG); + writel(rx, ioaddr + XGMAC_RX_CONFIG); +} + +static int dwxgmac2_rx_ipc(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_RX_CONFIG); + if (hw->rx_csum) + value |= XGMAC_CONFIG_IPC; + else + value &= ~XGMAC_CONFIG_IPC; + writel(value, ioaddr + XGMAC_RX_CONFIG); + + return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC); +} + +static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue); + if (mode == MTL_QUEUE_AVB) + value |= 0x1 << XGMAC_RXQEN_SHIFT(queue); + else if (mode == MTL_QUEUE_DCB) + value |= 0x2 << XGMAC_RXQEN_SHIFT(queue); + writel(value, ioaddr + XGMAC_RXQ_CTRL0); +} + +static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, reg; + + reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + reg); + value &= ~XGMAC_PSRQ(queue); + value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue); + + writel(value, ioaddr + reg); +} + +static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, reg; + + reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + reg); + value &= ~XGMAC_PSTC(queue); + value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue); + + writel(value, ioaddr + reg); +} + +static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw, + u32 rx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_MTL_OPMODE); + value &= ~XGMAC_RAA; + + switch (rx_alg) { + case MTL_RX_ALGORITHM_SP: + break; + case MTL_RX_ALGORITHM_WSP: + value |= XGMAC_RAA; + break; + default: + break; + } + + writel(value, ioaddr + XGMAC_MTL_OPMODE); +} + +static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw, + u32 tx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + bool ets = true; + u32 value; + int i; + + value = readl(ioaddr + XGMAC_MTL_OPMODE); + value &= ~XGMAC_ETSALG; + + switch (tx_alg) { + case MTL_TX_ALGORITHM_WRR: + value |= XGMAC_WRR; + break; + case MTL_TX_ALGORITHM_WFQ: + value |= XGMAC_WFQ; + break; + case MTL_TX_ALGORITHM_DWRR: + value |= XGMAC_DWRR; + break; + default: + ets = false; + break; + } + + writel(value, ioaddr + XGMAC_MTL_OPMODE); + + /* Set ETS if desired */ + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { + value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i)); + value &= ~XGMAC_TSA; + if (ets) + value |= XGMAC_ETS; + writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i)); + } +} + +static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw, + u32 weight, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + + writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue)); +} + +static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, + u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, reg; + + reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + reg); + value &= ~XGMAC_QxMDMACH(queue); + value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue); + + writel(value, ioaddr + reg); +} + +static void dwxgmac2_config_cbs(struct mac_device_info *hw, + u32 send_slope, u32 idle_slope, + u32 high_credit, u32 low_credit, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue)); + writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue)); + writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue)); + writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue)); + + value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); + value &= ~XGMAC_TSA; + value |= XGMAC_CC | XGMAC_CBS; + writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); +} + +static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + for (i = 0; i < XGMAC_MAC_REGSIZE; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + +static int dwxgmac2_host_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 stat, en; + int ret = 0; + + en = readl(ioaddr + XGMAC_INT_EN); + stat = readl(ioaddr + XGMAC_INT_STATUS); + + stat &= en; + + if (stat & XGMAC_PMTIS) { + x->irq_receive_pmt_irq_n++; + readl(ioaddr + XGMAC_PMT); + } + + if (stat & XGMAC_LPIIS) { + u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL); + + if (lpi & XGMAC_TLPIEN) { + ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; + x->irq_tx_path_in_lpi_mode_n++; + } + if (lpi & XGMAC_TLPIEX) { + ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; + x->irq_tx_path_exit_lpi_mode_n++; + } + if (lpi & XGMAC_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (lpi & XGMAC_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + + return ret; +} + +static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + int ret = 0; + u32 status; + + status = readl(ioaddr + XGMAC_MTL_INT_STATUS); + if (status & BIT(chan)) { + u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan)); + + if (chan_status & XGMAC_RXOVFIS) + ret |= CORE_IRQ_MTL_RX_OVERFLOW; + + writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan)); + } + + return ret; +} + +static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + u32 i; + + if (fc & FLOW_RX) + writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL); + if (fc & FLOW_TX) { + for (i = 0; i < tx_cnt; i++) { + u32 value = XGMAC_TFE; + + if (duplex) + value |= pause_time << XGMAC_PT_SHIFT; + + writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i)); + } + } +} + +static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + u32 val = 0x0; + + if (mode & WAKE_MAGIC) + val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN; + if (mode & WAKE_UCAST) + val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN; + if (val) { + u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG); + cfg |= XGMAC_CONFIG_RE; + writel(cfg, ioaddr + XGMAC_RX_CONFIG); + } + + writel(val, ioaddr + XGMAC_PMT); +} + +static void dwxgmac2_set_umac_addr(struct mac_device_info *hw, + const unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = (addr[5] << 8) | addr[4]; + writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n)); + + value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n)); +} + +static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + u32 hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n)); + lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n)); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} + +static void dwxgmac2_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_LPI_CTRL); + + value |= XGMAC_LPITXEN | XGMAC_LPITXA; + if (en_tx_lpi_clockgating) + value |= XGMAC_TXCGE; + + writel(value, ioaddr + XGMAC_LPI_CTRL); +} + +static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_LPI_CTRL); + value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE); + writel(value, ioaddr + XGMAC_LPI_CTRL); +} + +static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_LPI_CTRL); + if (link) + value |= XGMAC_PLS; + else + value &= ~XGMAC_PLS; + writel(value, ioaddr + XGMAC_LPI_CTRL); +} + +static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = (tw & 0xffff) | ((ls & 0x3ff) << 16); + writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL); +} + +static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits, + int mcbitslog2) +{ + int numhashregs, regs; + + switch (mcbitslog2) { + case 6: + numhashregs = 2; + break; + case 7: + numhashregs = 4; + break; + case 8: + numhashregs = 8; + break; + default: + return; + } + + for (regs = 0; regs < numhashregs; regs++) + writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs)); +} + +static void dwxgmac2_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + int mcbitslog2 = hw->mcast_bits_log2; + u32 mc_filter[8]; + int i; + + value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM); + value |= XGMAC_FILTER_HPF; + + memset(mc_filter, 0, sizeof(mc_filter)); + + if (dev->flags & IFF_PROMISC) { + value |= XGMAC_FILTER_PR; + value |= XGMAC_FILTER_PCF; + } else if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > hw->multicast_filter_bins)) { + value |= XGMAC_FILTER_PM; + + for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++) + writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i)); + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { + struct netdev_hw_addr *ha; + + value |= XGMAC_FILTER_HMC; + + netdev_for_each_mc_addr(ha, dev) { + u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >> + (32 - mcbitslog2)); + mc_filter[nr >> 5] |= (1 << (nr & 0x1F)); + } + } + + dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2); + + /* Handle multiple unicast addresses */ + if (netdev_uc_count(dev) > hw->unicast_filter_entries) { + value |= XGMAC_FILTER_PR; + } else { + struct netdev_hw_addr *ha; + int reg = 1; + + netdev_for_each_uc_addr(ha, dev) { + dwxgmac2_set_umac_addr(hw, ha->addr, reg); + reg++; + } + + for ( ; reg < XGMAC_ADDR_MAX; reg++) { + writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg)); + writel(0, ioaddr + XGMAC_ADDRx_LOW(reg)); + } + } + + writel(value, ioaddr + XGMAC_PACKET_FILTER); +} + +static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + XGMAC_RX_CONFIG); + + if (enable) + value |= XGMAC_CONFIG_LM; + else + value &= ~XGMAC_CONFIG_LM; + + writel(value, ioaddr + XGMAC_RX_CONFIG); +} + +static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx, + u32 val) +{ + u32 ctrl = 0; + + writel(val, ioaddr + XGMAC_RSS_DATA); + ctrl |= idx << XGMAC_RSSIA_SHIFT; + ctrl |= is_key ? XGMAC_ADDRT : 0x0; + ctrl |= XGMAC_OB; + writel(ctrl, ioaddr + XGMAC_RSS_ADDR); + + return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl, + !(ctrl & XGMAC_OB), 100, 10000); +} + +static int dwxgmac2_rss_configure(struct mac_device_info *hw, + struct stmmac_rss *cfg, u32 num_rxq) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, *key; + int i, ret; + + value = readl(ioaddr + XGMAC_RSS_CTRL); + if (!cfg || !cfg->enable) { + value &= ~XGMAC_RSSE; + writel(value, ioaddr + XGMAC_RSS_CTRL); + return 0; + } + + key = (u32 *)cfg->key; + for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) { + ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]); + if (ret) + return ret; + } + + for (i = 0; i < ARRAY_SIZE(cfg->table); i++) { + ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]); + if (ret) + return ret; + } + + for (i = 0; i < num_rxq; i++) + dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH); + + value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE; + writel(value, ioaddr + XGMAC_RSS_CTRL); + return 0; +} + +static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, + __le16 perfect_match, bool is_double) +{ + void __iomem *ioaddr = hw->pcsr; + + writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE); + + if (hash) { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value |= XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + XGMAC_VLAN_TAG); + + value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; + if (is_double) { + value |= XGMAC_VLAN_EDVLP; + value |= XGMAC_VLAN_ESVL; + value |= XGMAC_VLAN_DOVLTC; + } else { + value &= ~XGMAC_VLAN_EDVLP; + value &= ~XGMAC_VLAN_ESVL; + value &= ~XGMAC_VLAN_DOVLTC; + } + + value &= ~XGMAC_VLAN_VID; + writel(value, ioaddr + XGMAC_VLAN_TAG); + } else if (perfect_match) { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value |= XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + XGMAC_VLAN_TAG); + + value &= ~XGMAC_VLAN_VTHM; + value |= XGMAC_VLAN_ETV; + if (is_double) { + value |= XGMAC_VLAN_EDVLP; + value |= XGMAC_VLAN_ESVL; + value |= XGMAC_VLAN_DOVLTC; + } else { + value &= ~XGMAC_VLAN_EDVLP; + value &= ~XGMAC_VLAN_ESVL; + value &= ~XGMAC_VLAN_DOVLTC; + } + + value &= ~XGMAC_VLAN_VID; + writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG); + } else { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value &= ~XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + XGMAC_VLAN_TAG); + + value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV); + value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL); + value &= ~XGMAC_VLAN_DOVLTC; + value &= ~XGMAC_VLAN_VID; + + writel(value, ioaddr + XGMAC_VLAN_TAG); + } +} + +struct dwxgmac3_error_desc { + bool valid; + const char *desc; + const char *detailed_desc; +}; + +#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field) + +static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr, + const char *module_name, + const struct dwxgmac3_error_desc *desc, + unsigned long field_offset, + struct stmmac_safety_stats *stats) +{ + unsigned long loc, mask; + u8 *bptr = (u8 *)stats; + unsigned long *ptr; + + ptr = (unsigned long *)(bptr + field_offset); + + mask = value; + for_each_set_bit(loc, &mask, 32) { + netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ? + "correctable" : "uncorrectable", module_name, + desc[loc].desc, desc[loc].detailed_desc); + + /* Update counters */ + ptr[loc]++; + } +} + +static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= { + { true, "ATPES", "Application Transmit Interface Parity Check Error" }, + { true, "DPES", "Descriptor Cache Data Path Parity Check Error" }, + { true, "TPES", "TSO Data Path Parity Check Error" }, + { true, "TSOPES", "TSO Header Data Path Parity Check Error" }, + { true, "MTPES", "MTL Data Path Parity Check Error" }, + { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" }, + { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" }, + { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" }, + { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" }, + { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" }, + { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" }, + { true, "CWPES", "CSR Write Data Path Parity Check Error" }, + { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" }, + { true, "TTES", "TX FSM Timeout Error" }, + { true, "RTES", "RX FSM Timeout Error" }, + { true, "CTES", "CSR FSM Timeout Error" }, + { true, "ATES", "APP FSM Timeout Error" }, + { true, "PTES", "PTP FSM Timeout Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { true, "MSTTES", "Master Read/Write Timeout Error" }, + { true, "SLVTES", "Slave Read/Write Timeout Error" }, + { true, "ATITES", "Application Timeout on ATI Interface Error" }, + { true, "ARITES", "Application Timeout on ARI Interface Error" }, + { true, "FSMPES", "FSM State Parity Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { true, "CPI", "Control Register Parity Check Error" }, +}; + +static void dwxgmac3_handle_mac_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS); + writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS); + + dwxgmac3_log_error(ndev, value, correctable, "MAC", + dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats); +} + +static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= { + { true, "TXCES", "MTL TX Memory Error" }, + { true, "TXAMS", "MTL TX Memory Address Mismatch Error" }, + { true, "TXUES", "MTL TX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { true, "RXCES", "MTL RX Memory Error" }, + { true, "RXAMS", "MTL RX Memory Address Mismatch Error" }, + { true, "RXUES", "MTL RX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { true, "ECES", "MTL EST Memory Error" }, + { true, "EAMS", "MTL EST Memory Address Mismatch Error" }, + { true, "EUES", "MTL EST Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { true, "RPCES", "MTL RX Parser Memory Error" }, + { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" }, + { true, "RPUES", "MTL RX Parser Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwxgmac3_handle_mtl_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS); + writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS); + + dwxgmac3_log_error(ndev, value, correctable, "MTL", + dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats); +} + +static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= { + { true, "TCES", "DMA TSO Memory Error" }, + { true, "TAMS", "DMA TSO Memory Address Mismatch Error" }, + { true, "TUES", "DMA TSO Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { true, "DCES", "DMA DCACHE Memory Error" }, + { true, "DAMS", "DMA DCACHE Address Mismatch Error" }, + { true, "DUES", "DMA DCACHE Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { false, "UNKNOWN", "Unknown Error" }, /* 8 */ + { false, "UNKNOWN", "Unknown Error" }, /* 9 */ + { false, "UNKNOWN", "Unknown Error" }, /* 10 */ + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { false, "UNKNOWN", "Unknown Error" }, /* 12 */ + { false, "UNKNOWN", "Unknown Error" }, /* 13 */ + { false, "UNKNOWN", "Unknown Error" }, /* 14 */ + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error" +#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error" + +static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = { + { true, "TDPES0", DPP_TX_ERR }, + { true, "TDPES1", DPP_TX_ERR }, + { true, "TDPES2", DPP_TX_ERR }, + { true, "TDPES3", DPP_TX_ERR }, + { true, "TDPES4", DPP_TX_ERR }, + { true, "TDPES5", DPP_TX_ERR }, + { true, "TDPES6", DPP_TX_ERR }, + { true, "TDPES7", DPP_TX_ERR }, + { true, "TDPES8", DPP_TX_ERR }, + { true, "TDPES9", DPP_TX_ERR }, + { true, "TDPES10", DPP_TX_ERR }, + { true, "TDPES11", DPP_TX_ERR }, + { true, "TDPES12", DPP_TX_ERR }, + { true, "TDPES13", DPP_TX_ERR }, + { true, "TDPES14", DPP_TX_ERR }, + { true, "TDPES15", DPP_TX_ERR }, + { true, "RDPES0", DPP_RX_ERR }, + { true, "RDPES1", DPP_RX_ERR }, + { true, "RDPES2", DPP_RX_ERR }, + { true, "RDPES3", DPP_RX_ERR }, + { true, "RDPES4", DPP_RX_ERR }, + { true, "RDPES5", DPP_RX_ERR }, + { true, "RDPES6", DPP_RX_ERR }, + { true, "RDPES7", DPP_RX_ERR }, + { true, "RDPES8", DPP_RX_ERR }, + { true, "RDPES9", DPP_RX_ERR }, + { true, "RDPES10", DPP_RX_ERR }, + { true, "RDPES11", DPP_RX_ERR }, + { true, "RDPES12", DPP_RX_ERR }, + { true, "RDPES13", DPP_RX_ERR }, + { true, "RDPES14", DPP_RX_ERR }, + { true, "RDPES15", DPP_RX_ERR }, +}; + +static void dwxgmac3_handle_dma_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS); + writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS); + + dwxgmac3_log_error(ndev, value, correctable, "DMA", + dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats); + + value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS); + writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS); + + dwxgmac3_log_error(ndev, value, false, "DMA_DPP", + dwxgmac3_dma_dpp_errors, + STAT_OFF(dma_dpp_errors), stats); +} + +static int +dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_feature_cfg *safety_cfg) +{ + u32 value; + + if (!asp) + return -EINVAL; + + /* 1. Enable Safety Features */ + writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL); + + /* 2. Enable MTL Safety Interrupts */ + value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE); + value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */ + value |= XGMAC_ECEIE; /* EST Memory Correctable Error */ + value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */ + value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */ + writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE); + + /* 3. Enable DMA Safety Interrupts */ + value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE); + value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */ + value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */ + writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE); + + /* Only ECC Protection for External Memory feature is selected */ + if (asp <= 0x1) + return 0; + + /* 4. Enable Parity and Timeout for FSM */ + value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL); + value |= XGMAC_PRTYEN; /* FSM Parity Feature */ + value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */ + writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL); + + /* 5. Enable Data Path Parity Protection */ + value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL); + /* already enabled by default, explicit enable it again */ + value &= ~XGMAC_DPP_DISABLE; + writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL); + + return 0; +} + +static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, + unsigned int asp, + struct stmmac_safety_stats *stats) +{ + bool err, corr; + u32 mtl, dma; + int ret = 0; + + if (!asp) + return -EINVAL; + + mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS); + dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS); + + err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS); + corr = false; + if (err) { + dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) || + (dma & (XGMAC_MSUIS | XGMAC_MSCIS)); + corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS); + if (err) { + dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in + * DMA_Safety_Interrupt_Status, so we handle DMA Data Path + * Parity Errors here + */ + err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS); + corr = dma & XGMAC_DECIS; + if (err) { + dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + return ret; +} + +static const struct dwxgmac3_error { + const struct dwxgmac3_error_desc *desc; +} dwxgmac3_all_errors[] = { + { dwxgmac3_mac_errors }, + { dwxgmac3_mtl_errors }, + { dwxgmac3_dma_errors }, + { dwxgmac3_dma_dpp_errors }, +}; + +static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, + const char **desc) +{ + int module = index / 32, offset = index % 32; + unsigned long *ptr = (unsigned long *)stats; + + if (module >= ARRAY_SIZE(dwxgmac3_all_errors)) + return -EINVAL; + if (!dwxgmac3_all_errors[module].desc[offset].valid) + return -EINVAL; + if (count) + *count = *(ptr + index); + if (desc) + *desc = dwxgmac3_all_errors[module].desc[offset].desc; + return 0; +} + +static int dwxgmac3_rxp_disable(void __iomem *ioaddr) +{ + u32 val = readl(ioaddr + XGMAC_MTL_OPMODE); + + val &= ~XGMAC_FRPE; + writel(val, ioaddr + XGMAC_MTL_OPMODE); + + return 0; +} + +static void dwxgmac3_rxp_enable(void __iomem *ioaddr) +{ + u32 val; + + val = readl(ioaddr + XGMAC_MTL_OPMODE); + val |= XGMAC_FRPE; + writel(val, ioaddr + XGMAC_MTL_OPMODE); +} + +static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr, + struct stmmac_tc_entry *entry, + int pos) +{ + int ret, i; + + for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) { + int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i; + u32 val; + + /* Wait for ready */ + ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST, + val, !(val & XGMAC_STARTBUSY), 1, 10000); + if (ret) + return ret; + + /* Write data */ + val = *((u32 *)&entry->val + i); + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA); + + /* Write pos */ + val = real_pos & XGMAC_ADDR; + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST); + + /* Write OP */ + val |= XGMAC_WRRDN; + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST); + + /* Start Write */ + val |= XGMAC_STARTBUSY; + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST); + + /* Wait for done */ + ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST, + val, !(val & XGMAC_STARTBUSY), 1, 10000); + if (ret) + return ret; + } + + return 0; +} + +static struct stmmac_tc_entry * +dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries, + unsigned int count, u32 curr_prio) +{ + struct stmmac_tc_entry *entry; + u32 min_prio = ~0x0; + int i, min_prio_idx; + bool found = false; + + for (i = count - 1; i >= 0; i--) { + entry = &entries[i]; + + /* Do not update unused entries */ + if (!entry->in_use) + continue; + /* Do not update already updated entries (i.e. fragments) */ + if (entry->in_hw) + continue; + /* Let last entry be updated last */ + if (entry->is_last) + continue; + /* Do not return fragments */ + if (entry->is_frag) + continue; + /* Check if we already checked this prio */ + if (entry->prio < curr_prio) + continue; + /* Check if this is the minimum prio */ + if (entry->prio < min_prio) { + min_prio = entry->prio; + min_prio_idx = i; + found = true; + } + } + + if (found) + return &entries[min_prio_idx]; + return NULL; +} + +static int dwxgmac3_rxp_config(void __iomem *ioaddr, + struct stmmac_tc_entry *entries, + unsigned int count) +{ + struct stmmac_tc_entry *entry, *frag; + int i, ret, nve = 0; + u32 curr_prio = 0; + u32 old_val, val; + + /* Force disable RX */ + old_val = readl(ioaddr + XGMAC_RX_CONFIG); + val = old_val & ~XGMAC_CONFIG_RE; + writel(val, ioaddr + XGMAC_RX_CONFIG); + + /* Disable RX Parser */ + ret = dwxgmac3_rxp_disable(ioaddr); + if (ret) + goto re_enable; + + /* Set all entries as NOT in HW */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + entry->in_hw = false; + } + + /* Update entries by reverse order */ + while (1) { + entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio); + if (!entry) + break; + + curr_prio = entry->prio; + frag = entry->frag_ptr; + + /* Set special fragment requirements */ + if (frag) { + entry->val.af = 0; + entry->val.rf = 0; + entry->val.nc = 1; + entry->val.ok_index = nve + 2; + } + + ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + entry->in_hw = true; + + if (frag && !frag->in_hw) { + ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve); + if (ret) + goto re_enable; + frag->table_pos = nve++; + frag->in_hw = true; + } + } + + if (!nve) + goto re_enable; + + /* Update all pass entry */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + if (!entry->is_last) + continue; + + ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + } + + /* Assume n. of parsable entries == n. of valid entries */ + val = (nve << 16) & XGMAC_NPE; + val |= nve & XGMAC_NVE; + writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS); + + /* Enable RX Parser */ + dwxgmac3_rxp_enable(ioaddr); + +re_enable: + /* Re-enable RX */ + writel(old_val, ioaddr + XGMAC_RX_CONFIG); + return ret; +} + +static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS, + value, value & XGMAC_TXTSC, 100, 10000)) + return -EBUSY; + + *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO; + *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL; + return 0; +} + +static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags) +{ + u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index)); + u32 val = readl(ioaddr + XGMAC_PPS_CONTROL); + u64 period; + + if (!cfg->available) + return -EINVAL; + if (tnsec & XGMAC_TRGTBUSY0) + return -EBUSY; + if (!sub_second_inc || !systime_flags) + return -EINVAL; + + val &= ~XGMAC_PPSx_MASK(index); + + if (!enable) { + val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP); + writel(val, ioaddr + XGMAC_PPS_CONTROL); + return 0; + } + + val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START); + val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START); + + /* XGMAC Core has 4 PPS outputs at most. + * + * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for + * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default, + * and can not be switched to Fixed mode, since PPSEN{1,2,3} are + * read-only reserved to 0. + * But we always set PPSEN{1,2,3} do not make things worse ;-) + * + * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must + * be set, or the PPS outputs stay in Fixed PPS mode by default. + */ + val |= XGMAC_PPSENx(index); + + writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index)); + + if (!(systime_flags & PTP_TCR_TSCTRLSSR)) + cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; + writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index)); + + period = cfg->period.tv_sec * 1000000000; + period += cfg->period.tv_nsec; + + do_div(period, sub_second_inc); + + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index)); + + period >>= 1; + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index)); + + /* Finally, activate it */ + writel(val, ioaddr + XGMAC_PPS_CONTROL); + return 0; +} + +static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val) +{ + u32 value = readl(ioaddr + XGMAC_TX_CONFIG); + + value &= ~XGMAC_CONFIG_SARC; + value |= val << XGMAC_CONFIG_SARC_SHIFT; + + writel(value, ioaddr + XGMAC_TX_CONFIG); +} + +static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_VLAN_INCL); + value |= XGMAC_VLAN_VLTI; + value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */ + value &= ~XGMAC_VLAN_VLC; + value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC; + writel(value, ioaddr + XGMAC_VLAN_INCL); +} + +static int dwxgmac2_filter_wait(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value, + !(value & XGMAC_XB), 100, 10000)) + return -EBUSY; + return 0; +} + +static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no, + u8 reg, u32 *data) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + ret = dwxgmac2_filter_wait(hw); + if (ret) + return ret; + + value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT; + value |= XGMAC_TT | XGMAC_XB; + writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL); + + ret = dwxgmac2_filter_wait(hw); + if (ret) + return ret; + + *data = readl(ioaddr + XGMAC_L3L4_DATA); + return 0; +} + +static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no, + u8 reg, u32 data) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + ret = dwxgmac2_filter_wait(hw); + if (ret) + return ret; + + writel(data, ioaddr + XGMAC_L3L4_DATA); + + value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT; + value |= XGMAC_XB; + writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL); + + return dwxgmac2_filter_wait(hw); +} + +static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool ipv6, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + value = readl(ioaddr + XGMAC_PACKET_FILTER); + value |= XGMAC_FILTER_IPFE; + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value); + if (ret) + return ret; + + /* For IPv6 not both SA/DA filters can be active */ + if (ipv6) { + value |= XGMAC_L3PEN0; + value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0); + value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0); + if (sa) { + value |= XGMAC_L3SAM0; + if (inv) + value |= XGMAC_L3SAIM0; + } else { + value |= XGMAC_L3DAM0; + if (inv) + value |= XGMAC_L3DAIM0; + } + } else { + value &= ~XGMAC_L3PEN0; + if (sa) { + value |= XGMAC_L3SAM0; + if (inv) + value |= XGMAC_L3SAIM0; + } else { + value |= XGMAC_L3DAM0; + if (inv) + value |= XGMAC_L3DAIM0; + } + } + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value); + if (ret) + return ret; + + if (sa) { + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match); + if (ret) + return ret; + } else { + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match); + if (ret) + return ret; + } + + if (!en) + return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0); + + return 0; +} + +static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool udp, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + value = readl(ioaddr + XGMAC_PACKET_FILTER); + value |= XGMAC_FILTER_IPFE; + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value); + if (ret) + return ret; + + if (udp) { + value |= XGMAC_L4PEN0; + } else { + value &= ~XGMAC_L4PEN0; + } + + value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0); + value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0); + if (sa) { + value |= XGMAC_L4SPM0; + if (inv) + value |= XGMAC_L4SPIM0; + } else { + value |= XGMAC_L4DPM0; + if (inv) + value |= XGMAC_L4DPIM0; + } + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value); + if (ret) + return ret; + + if (sa) { + value = match & XGMAC_L4SP0; + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); + if (ret) + return ret; + } else { + value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0; + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); + if (ret) + return ret; + } + + if (!en) + return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0); + + return 0; +} + +static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, + u32 addr) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(addr, ioaddr + XGMAC_ARP_ADDR); + + value = readl(ioaddr + XGMAC_RX_CONFIG); + if (en) + value |= XGMAC_CONFIG_ARPEN; + else + value &= ~XGMAC_CONFIG_ARPEN; + writel(value, ioaddr + XGMAC_RX_CONFIG); +} + +static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl) +{ + u32 ctrl; + + writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA); + + ctrl = (reg << XGMAC_ADDR_SHIFT); + ctrl |= gcl ? 0 : XGMAC_GCRR; + + writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL); + + ctrl |= XGMAC_SRWO; + writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL); + + return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL, + ctrl, !(ctrl & XGMAC_SRWO), 100, 5000); +} + +static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, + unsigned int ptp_rate) +{ + int i, ret = 0x0; + u32 ctrl; + + ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false); + if (ret) + return ret; + + for (i = 0; i < cfg->gcl_size; i++) { + ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true); + if (ret) + return ret; + } + + ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL); + ctrl &= ~XGMAC_PTOV; + ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT; + if (cfg->enable) + ctrl |= XGMAC_EEST | XGMAC_SSWL; + else + ctrl &= ~XGMAC_EEST; + + writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL); + return 0; +} + +static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, + u32 num_rxq, bool enable) +{ + u32 value; + + if (!enable) { + value = readl(ioaddr + XGMAC_FPE_CTRL_STS); + + value &= ~XGMAC_EFPE; + + writel(value, ioaddr + XGMAC_FPE_CTRL_STS); + return; + } + + value = readl(ioaddr + XGMAC_RXQ_CTRL1); + value &= ~XGMAC_RQ; + value |= (num_rxq - 1) << XGMAC_RQ_SHIFT; + writel(value, ioaddr + XGMAC_RXQ_CTRL1); + + value = readl(ioaddr + XGMAC_FPE_CTRL_STS); + value |= XGMAC_EFPE; + writel(value, ioaddr + XGMAC_FPE_CTRL_STS); +} + +const struct stmmac_ops dwxgmac210_ops = { + .core_init = dwxgmac2_core_init, + .set_mac = dwxgmac2_set_mac, + .rx_ipc = dwxgmac2_rx_ipc, + .rx_queue_enable = dwxgmac2_rx_queue_enable, + .rx_queue_prio = dwxgmac2_rx_queue_prio, + .tx_queue_prio = dwxgmac2_tx_queue_prio, + .rx_queue_routing = NULL, + .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, + .config_cbs = dwxgmac2_config_cbs, + .dump_regs = dwxgmac2_dump_regs, + .host_irq_status = dwxgmac2_host_irq_status, + .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, + .flow_ctrl = dwxgmac2_flow_ctrl, + .pmt = dwxgmac2_pmt, + .set_umac_addr = dwxgmac2_set_umac_addr, + .get_umac_addr = dwxgmac2_get_umac_addr, + .set_eee_mode = dwxgmac2_set_eee_mode, + .reset_eee_mode = dwxgmac2_reset_eee_mode, + .set_eee_timer = dwxgmac2_set_eee_timer, + .set_eee_pls = dwxgmac2_set_eee_pls, + .pcs_ctrl_ane = NULL, + .pcs_rane = NULL, + .pcs_get_adv_lp = NULL, + .debug = NULL, + .set_filter = dwxgmac2_set_filter, + .safety_feat_config = dwxgmac3_safety_feat_config, + .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status, + .safety_feat_dump = dwxgmac3_safety_feat_dump, + .set_mac_loopback = dwxgmac2_set_mac_loopback, + .rss_configure = dwxgmac2_rss_configure, + .update_vlan_hash = dwxgmac2_update_vlan_hash, + .rxp_config = dwxgmac3_rxp_config, + .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp, + .flex_pps_config = dwxgmac2_flex_pps_config, + .sarc_configure = dwxgmac2_sarc_configure, + .enable_vlan = dwxgmac2_enable_vlan, + .config_l3_filter = dwxgmac2_config_l3_filter, + .config_l4_filter = dwxgmac2_config_l4_filter, + .set_arp_offload = dwxgmac2_set_arp_offload, + .est_configure = dwxgmac3_est_configure, + .fpe_configure = dwxgmac3_fpe_configure, +}; + +static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue); + if (mode == MTL_QUEUE_AVB) + value |= 0x1 << XGMAC_RXQEN_SHIFT(queue); + else if (mode == MTL_QUEUE_DCB) + value |= 0x2 << XGMAC_RXQEN_SHIFT(queue); + writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0); +} + +const struct stmmac_ops dwxlgmac2_ops = { + .core_init = dwxgmac2_core_init, + .set_mac = dwxgmac2_set_mac, + .rx_ipc = dwxgmac2_rx_ipc, + .rx_queue_enable = dwxlgmac2_rx_queue_enable, + .rx_queue_prio = dwxgmac2_rx_queue_prio, + .tx_queue_prio = dwxgmac2_tx_queue_prio, + .rx_queue_routing = NULL, + .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, + .config_cbs = dwxgmac2_config_cbs, + .dump_regs = dwxgmac2_dump_regs, + .host_irq_status = dwxgmac2_host_irq_status, + .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, + .flow_ctrl = dwxgmac2_flow_ctrl, + .pmt = dwxgmac2_pmt, + .set_umac_addr = dwxgmac2_set_umac_addr, + .get_umac_addr = dwxgmac2_get_umac_addr, + .set_eee_mode = dwxgmac2_set_eee_mode, + .reset_eee_mode = dwxgmac2_reset_eee_mode, + .set_eee_timer = dwxgmac2_set_eee_timer, + .set_eee_pls = dwxgmac2_set_eee_pls, + .pcs_ctrl_ane = NULL, + .pcs_rane = NULL, + .pcs_get_adv_lp = NULL, + .debug = NULL, + .set_filter = dwxgmac2_set_filter, + .safety_feat_config = dwxgmac3_safety_feat_config, + .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status, + .safety_feat_dump = dwxgmac3_safety_feat_dump, + .set_mac_loopback = dwxgmac2_set_mac_loopback, + .rss_configure = dwxgmac2_rss_configure, + .update_vlan_hash = dwxgmac2_update_vlan_hash, + .rxp_config = dwxgmac3_rxp_config, + .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp, + .flex_pps_config = dwxgmac2_flex_pps_config, + .sarc_configure = dwxgmac2_sarc_configure, + .enable_vlan = dwxgmac2_enable_vlan, + .config_l3_filter = dwxgmac2_config_l3_filter, + .config_l4_filter = dwxgmac2_config_l4_filter, + .set_arp_offload = dwxgmac2_set_arp_offload, + .est_configure = dwxgmac3_est_configure, + .fpe_configure = dwxgmac3_fpe_configure, +}; + +int dwxgmac2_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tXGMAC2\n"); + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.duplex = 0; + mac->link.speed10 = XGMAC_CONFIG_SS_10_MII; + mac->link.speed100 = XGMAC_CONFIG_SS_100_MII; + mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII; + mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII; + mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500; + mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000; + mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000; + mac->link.speed_mask = XGMAC_CONFIG_SS_MASK; + + mac->mii.addr = XGMAC_MDIO_ADDR; + mac->mii.data = XGMAC_MDIO_DATA; + mac->mii.addr_shift = 16; + mac->mii.addr_mask = GENMASK(20, 16); + mac->mii.reg_shift = 0; + mac->mii.reg_mask = GENMASK(15, 0); + mac->mii.clk_csr_shift = 19; + mac->mii.clk_csr_mask = GENMASK(21, 19); + + return 0; +} + +int dwxlgmac2_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tXLGMAC\n"); + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.duplex = 0; + mac->link.speed1000 = XLGMAC_CONFIG_SS_1000; + mac->link.speed2500 = XLGMAC_CONFIG_SS_2500; + mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G; + mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G; + mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G; + mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G; + mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G; + mac->link.speed_mask = XLGMAC_CONFIG_SS; + + mac->mii.addr = XGMAC_MDIO_ADDR; + mac->mii.data = XGMAC_MDIO_DATA; + mac->mii.addr_shift = 16; + mac->mii.addr_mask = GENMASK(20, 16); + mac->mii.reg_shift = 0; + mac->mii.reg_mask = GENMASK(15, 0); + mac->mii.clk_csr_shift = 19; + mac->mii.clk_csr_mask = GENMASK(21, 19); + + return 0; +} diff --git a/devices/stmmac/dwxgmac2_core-6.1-orig.c b/devices/stmmac/dwxgmac2_core-6.1-orig.c new file mode 100644 index 00000000..ec1616ff --- /dev/null +++ b/devices/stmmac/dwxgmac2_core-6.1-orig.c @@ -0,0 +1,1705 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC support. + */ + +#include +#include +#include +#include "stmmac.h" +#include "stmmac_ptp.h" +#include "dwxlgmac2.h" +#include "dwxgmac2.h" + +static void dwxgmac2_core_init(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = hw->pcsr; + u32 tx, rx; + + tx = readl(ioaddr + XGMAC_TX_CONFIG); + rx = readl(ioaddr + XGMAC_RX_CONFIG); + + tx |= XGMAC_CORE_INIT_TX; + rx |= XGMAC_CORE_INIT_RX; + + if (hw->ps) { + tx |= XGMAC_CONFIG_TE; + tx &= ~hw->link.speed_mask; + + switch (hw->ps) { + case SPEED_10000: + tx |= hw->link.xgmii.speed10000; + break; + case SPEED_2500: + tx |= hw->link.speed2500; + break; + case SPEED_1000: + default: + tx |= hw->link.speed1000; + break; + } + } + + writel(tx, ioaddr + XGMAC_TX_CONFIG); + writel(rx, ioaddr + XGMAC_RX_CONFIG); + writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); +} + +static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 tx = readl(ioaddr + XGMAC_TX_CONFIG); + u32 rx = readl(ioaddr + XGMAC_RX_CONFIG); + + if (enable) { + tx |= XGMAC_CONFIG_TE; + rx |= XGMAC_CONFIG_RE; + } else { + tx &= ~XGMAC_CONFIG_TE; + rx &= ~XGMAC_CONFIG_RE; + } + + writel(tx, ioaddr + XGMAC_TX_CONFIG); + writel(rx, ioaddr + XGMAC_RX_CONFIG); +} + +static int dwxgmac2_rx_ipc(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_RX_CONFIG); + if (hw->rx_csum) + value |= XGMAC_CONFIG_IPC; + else + value &= ~XGMAC_CONFIG_IPC; + writel(value, ioaddr + XGMAC_RX_CONFIG); + + return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC); +} + +static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue); + if (mode == MTL_QUEUE_AVB) + value |= 0x1 << XGMAC_RXQEN_SHIFT(queue); + else if (mode == MTL_QUEUE_DCB) + value |= 0x2 << XGMAC_RXQEN_SHIFT(queue); + writel(value, ioaddr + XGMAC_RXQ_CTRL0); +} + +static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, reg; + + reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + reg); + value &= ~XGMAC_PSRQ(queue); + value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue); + + writel(value, ioaddr + reg); +} + +static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, reg; + + reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + reg); + value &= ~XGMAC_PSTC(queue); + value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue); + + writel(value, ioaddr + reg); +} + +static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw, + u32 rx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_MTL_OPMODE); + value &= ~XGMAC_RAA; + + switch (rx_alg) { + case MTL_RX_ALGORITHM_SP: + break; + case MTL_RX_ALGORITHM_WSP: + value |= XGMAC_RAA; + break; + default: + break; + } + + writel(value, ioaddr + XGMAC_MTL_OPMODE); +} + +static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw, + u32 tx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + bool ets = true; + u32 value; + int i; + + value = readl(ioaddr + XGMAC_MTL_OPMODE); + value &= ~XGMAC_ETSALG; + + switch (tx_alg) { + case MTL_TX_ALGORITHM_WRR: + value |= XGMAC_WRR; + break; + case MTL_TX_ALGORITHM_WFQ: + value |= XGMAC_WFQ; + break; + case MTL_TX_ALGORITHM_DWRR: + value |= XGMAC_DWRR; + break; + default: + ets = false; + break; + } + + writel(value, ioaddr + XGMAC_MTL_OPMODE); + + /* Set ETS if desired */ + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { + value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i)); + value &= ~XGMAC_TSA; + if (ets) + value |= XGMAC_ETS; + writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i)); + } +} + +static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw, + u32 weight, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + + writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue)); +} + +static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, + u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, reg; + + reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1; + if (queue >= 4) + queue -= 4; + + value = readl(ioaddr + reg); + value &= ~XGMAC_QxMDMACH(queue); + value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue); + + writel(value, ioaddr + reg); +} + +static void dwxgmac2_config_cbs(struct mac_device_info *hw, + u32 send_slope, u32 idle_slope, + u32 high_credit, u32 low_credit, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue)); + writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue)); + writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue)); + writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue)); + + value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); + value &= ~XGMAC_TSA; + value |= XGMAC_CC | XGMAC_CBS; + writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); +} + +static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + for (i = 0; i < XGMAC_MAC_REGSIZE; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + +static int dwxgmac2_host_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 stat, en; + int ret = 0; + + en = readl(ioaddr + XGMAC_INT_EN); + stat = readl(ioaddr + XGMAC_INT_STATUS); + + stat &= en; + + if (stat & XGMAC_PMTIS) { + x->irq_receive_pmt_irq_n++; + readl(ioaddr + XGMAC_PMT); + } + + if (stat & XGMAC_LPIIS) { + u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL); + + if (lpi & XGMAC_TLPIEN) { + ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; + x->irq_tx_path_in_lpi_mode_n++; + } + if (lpi & XGMAC_TLPIEX) { + ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; + x->irq_tx_path_exit_lpi_mode_n++; + } + if (lpi & XGMAC_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (lpi & XGMAC_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + + return ret; +} + +static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + int ret = 0; + u32 status; + + status = readl(ioaddr + XGMAC_MTL_INT_STATUS); + if (status & BIT(chan)) { + u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan)); + + if (chan_status & XGMAC_RXOVFIS) + ret |= CORE_IRQ_MTL_RX_OVERFLOW; + + writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan)); + } + + return ret; +} + +static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) +{ + void __iomem *ioaddr = hw->pcsr; + u32 i; + + if (fc & FLOW_RX) + writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL); + if (fc & FLOW_TX) { + for (i = 0; i < tx_cnt; i++) { + u32 value = XGMAC_TFE; + + if (duplex) + value |= pause_time << XGMAC_PT_SHIFT; + + writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i)); + } + } +} + +static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + u32 val = 0x0; + + if (mode & WAKE_MAGIC) + val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN; + if (mode & WAKE_UCAST) + val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN; + if (val) { + u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG); + cfg |= XGMAC_CONFIG_RE; + writel(cfg, ioaddr + XGMAC_RX_CONFIG); + } + + writel(val, ioaddr + XGMAC_PMT); +} + +static void dwxgmac2_set_umac_addr(struct mac_device_info *hw, + const unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = (addr[5] << 8) | addr[4]; + writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n)); + + value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n)); +} + +static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + u32 hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n)); + lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n)); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} + +static void dwxgmac2_set_eee_mode(struct mac_device_info *hw, + bool en_tx_lpi_clockgating) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_LPI_CTRL); + + value |= XGMAC_LPITXEN | XGMAC_LPITXA; + if (en_tx_lpi_clockgating) + value |= XGMAC_TXCGE; + + writel(value, ioaddr + XGMAC_LPI_CTRL); +} + +static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_LPI_CTRL); + value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE); + writel(value, ioaddr + XGMAC_LPI_CTRL); +} + +static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_LPI_CTRL); + if (link) + value |= XGMAC_PLS; + else + value &= ~XGMAC_PLS; + writel(value, ioaddr + XGMAC_LPI_CTRL); +} + +static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = (tw & 0xffff) | ((ls & 0x3ff) << 16); + writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL); +} + +static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits, + int mcbitslog2) +{ + int numhashregs, regs; + + switch (mcbitslog2) { + case 6: + numhashregs = 2; + break; + case 7: + numhashregs = 4; + break; + case 8: + numhashregs = 8; + break; + default: + return; + } + + for (regs = 0; regs < numhashregs; regs++) + writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs)); +} + +static void dwxgmac2_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + int mcbitslog2 = hw->mcast_bits_log2; + u32 mc_filter[8]; + int i; + + value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM); + value |= XGMAC_FILTER_HPF; + + memset(mc_filter, 0, sizeof(mc_filter)); + + if (dev->flags & IFF_PROMISC) { + value |= XGMAC_FILTER_PR; + value |= XGMAC_FILTER_PCF; + } else if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > hw->multicast_filter_bins)) { + value |= XGMAC_FILTER_PM; + + for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++) + writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i)); + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { + struct netdev_hw_addr *ha; + + value |= XGMAC_FILTER_HMC; + + netdev_for_each_mc_addr(ha, dev) { + u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >> + (32 - mcbitslog2)); + mc_filter[nr >> 5] |= (1 << (nr & 0x1F)); + } + } + + dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2); + + /* Handle multiple unicast addresses */ + if (netdev_uc_count(dev) > hw->unicast_filter_entries) { + value |= XGMAC_FILTER_PR; + } else { + struct netdev_hw_addr *ha; + int reg = 1; + + netdev_for_each_uc_addr(ha, dev) { + dwxgmac2_set_umac_addr(hw, ha->addr, reg); + reg++; + } + + for ( ; reg < XGMAC_ADDR_MAX; reg++) { + writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg)); + writel(0, ioaddr + XGMAC_ADDRx_LOW(reg)); + } + } + + writel(value, ioaddr + XGMAC_PACKET_FILTER); +} + +static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + XGMAC_RX_CONFIG); + + if (enable) + value |= XGMAC_CONFIG_LM; + else + value &= ~XGMAC_CONFIG_LM; + + writel(value, ioaddr + XGMAC_RX_CONFIG); +} + +static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx, + u32 val) +{ + u32 ctrl = 0; + + writel(val, ioaddr + XGMAC_RSS_DATA); + ctrl |= idx << XGMAC_RSSIA_SHIFT; + ctrl |= is_key ? XGMAC_ADDRT : 0x0; + ctrl |= XGMAC_OB; + writel(ctrl, ioaddr + XGMAC_RSS_ADDR); + + return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl, + !(ctrl & XGMAC_OB), 100, 10000); +} + +static int dwxgmac2_rss_configure(struct mac_device_info *hw, + struct stmmac_rss *cfg, u32 num_rxq) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value, *key; + int i, ret; + + value = readl(ioaddr + XGMAC_RSS_CTRL); + if (!cfg || !cfg->enable) { + value &= ~XGMAC_RSSE; + writel(value, ioaddr + XGMAC_RSS_CTRL); + return 0; + } + + key = (u32 *)cfg->key; + for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) { + ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]); + if (ret) + return ret; + } + + for (i = 0; i < ARRAY_SIZE(cfg->table); i++) { + ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]); + if (ret) + return ret; + } + + for (i = 0; i < num_rxq; i++) + dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH); + + value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE; + writel(value, ioaddr + XGMAC_RSS_CTRL); + return 0; +} + +static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, + __le16 perfect_match, bool is_double) +{ + void __iomem *ioaddr = hw->pcsr; + + writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE); + + if (hash) { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value |= XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + XGMAC_VLAN_TAG); + + value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; + if (is_double) { + value |= XGMAC_VLAN_EDVLP; + value |= XGMAC_VLAN_ESVL; + value |= XGMAC_VLAN_DOVLTC; + } else { + value &= ~XGMAC_VLAN_EDVLP; + value &= ~XGMAC_VLAN_ESVL; + value &= ~XGMAC_VLAN_DOVLTC; + } + + value &= ~XGMAC_VLAN_VID; + writel(value, ioaddr + XGMAC_VLAN_TAG); + } else if (perfect_match) { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value |= XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + XGMAC_VLAN_TAG); + + value &= ~XGMAC_VLAN_VTHM; + value |= XGMAC_VLAN_ETV; + if (is_double) { + value |= XGMAC_VLAN_EDVLP; + value |= XGMAC_VLAN_ESVL; + value |= XGMAC_VLAN_DOVLTC; + } else { + value &= ~XGMAC_VLAN_EDVLP; + value &= ~XGMAC_VLAN_ESVL; + value &= ~XGMAC_VLAN_DOVLTC; + } + + value &= ~XGMAC_VLAN_VID; + writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG); + } else { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value &= ~XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + XGMAC_VLAN_TAG); + + value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV); + value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL); + value &= ~XGMAC_VLAN_DOVLTC; + value &= ~XGMAC_VLAN_VID; + + writel(value, ioaddr + XGMAC_VLAN_TAG); + } +} + +struct dwxgmac3_error_desc { + bool valid; + const char *desc; + const char *detailed_desc; +}; + +#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field) + +static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr, + const char *module_name, + const struct dwxgmac3_error_desc *desc, + unsigned long field_offset, + struct stmmac_safety_stats *stats) +{ + unsigned long loc, mask; + u8 *bptr = (u8 *)stats; + unsigned long *ptr; + + ptr = (unsigned long *)(bptr + field_offset); + + mask = value; + for_each_set_bit(loc, &mask, 32) { + netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ? + "correctable" : "uncorrectable", module_name, + desc[loc].desc, desc[loc].detailed_desc); + + /* Update counters */ + ptr[loc]++; + } +} + +static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= { + { true, "ATPES", "Application Transmit Interface Parity Check Error" }, + { true, "DPES", "Descriptor Cache Data Path Parity Check Error" }, + { true, "TPES", "TSO Data Path Parity Check Error" }, + { true, "TSOPES", "TSO Header Data Path Parity Check Error" }, + { true, "MTPES", "MTL Data Path Parity Check Error" }, + { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" }, + { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" }, + { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" }, + { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" }, + { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" }, + { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" }, + { true, "CWPES", "CSR Write Data Path Parity Check Error" }, + { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" }, + { true, "TTES", "TX FSM Timeout Error" }, + { true, "RTES", "RX FSM Timeout Error" }, + { true, "CTES", "CSR FSM Timeout Error" }, + { true, "ATES", "APP FSM Timeout Error" }, + { true, "PTES", "PTP FSM Timeout Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { true, "MSTTES", "Master Read/Write Timeout Error" }, + { true, "SLVTES", "Slave Read/Write Timeout Error" }, + { true, "ATITES", "Application Timeout on ATI Interface Error" }, + { true, "ARITES", "Application Timeout on ARI Interface Error" }, + { true, "FSMPES", "FSM State Parity Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { true, "CPI", "Control Register Parity Check Error" }, +}; + +static void dwxgmac3_handle_mac_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS); + writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS); + + dwxgmac3_log_error(ndev, value, correctable, "MAC", + dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats); +} + +static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= { + { true, "TXCES", "MTL TX Memory Error" }, + { true, "TXAMS", "MTL TX Memory Address Mismatch Error" }, + { true, "TXUES", "MTL TX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { true, "RXCES", "MTL RX Memory Error" }, + { true, "RXAMS", "MTL RX Memory Address Mismatch Error" }, + { true, "RXUES", "MTL RX Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { true, "ECES", "MTL EST Memory Error" }, + { true, "EAMS", "MTL EST Memory Address Mismatch Error" }, + { true, "EUES", "MTL EST Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { true, "RPCES", "MTL RX Parser Memory Error" }, + { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" }, + { true, "RPUES", "MTL RX Parser Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +static void dwxgmac3_handle_mtl_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS); + writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS); + + dwxgmac3_log_error(ndev, value, correctable, "MTL", + dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats); +} + +static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= { + { true, "TCES", "DMA TSO Memory Error" }, + { true, "TAMS", "DMA TSO Memory Address Mismatch Error" }, + { true, "TUES", "DMA TSO Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 3 */ + { true, "DCES", "DMA DCACHE Memory Error" }, + { true, "DAMS", "DMA DCACHE Address Mismatch Error" }, + { true, "DUES", "DMA DCACHE Memory Error" }, + { false, "UNKNOWN", "Unknown Error" }, /* 7 */ + { false, "UNKNOWN", "Unknown Error" }, /* 8 */ + { false, "UNKNOWN", "Unknown Error" }, /* 9 */ + { false, "UNKNOWN", "Unknown Error" }, /* 10 */ + { false, "UNKNOWN", "Unknown Error" }, /* 11 */ + { false, "UNKNOWN", "Unknown Error" }, /* 12 */ + { false, "UNKNOWN", "Unknown Error" }, /* 13 */ + { false, "UNKNOWN", "Unknown Error" }, /* 14 */ + { false, "UNKNOWN", "Unknown Error" }, /* 15 */ + { false, "UNKNOWN", "Unknown Error" }, /* 16 */ + { false, "UNKNOWN", "Unknown Error" }, /* 17 */ + { false, "UNKNOWN", "Unknown Error" }, /* 18 */ + { false, "UNKNOWN", "Unknown Error" }, /* 19 */ + { false, "UNKNOWN", "Unknown Error" }, /* 20 */ + { false, "UNKNOWN", "Unknown Error" }, /* 21 */ + { false, "UNKNOWN", "Unknown Error" }, /* 22 */ + { false, "UNKNOWN", "Unknown Error" }, /* 23 */ + { false, "UNKNOWN", "Unknown Error" }, /* 24 */ + { false, "UNKNOWN", "Unknown Error" }, /* 25 */ + { false, "UNKNOWN", "Unknown Error" }, /* 26 */ + { false, "UNKNOWN", "Unknown Error" }, /* 27 */ + { false, "UNKNOWN", "Unknown Error" }, /* 28 */ + { false, "UNKNOWN", "Unknown Error" }, /* 29 */ + { false, "UNKNOWN", "Unknown Error" }, /* 30 */ + { false, "UNKNOWN", "Unknown Error" }, /* 31 */ +}; + +#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error" +#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error" + +static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = { + { true, "TDPES0", DPP_TX_ERR }, + { true, "TDPES1", DPP_TX_ERR }, + { true, "TDPES2", DPP_TX_ERR }, + { true, "TDPES3", DPP_TX_ERR }, + { true, "TDPES4", DPP_TX_ERR }, + { true, "TDPES5", DPP_TX_ERR }, + { true, "TDPES6", DPP_TX_ERR }, + { true, "TDPES7", DPP_TX_ERR }, + { true, "TDPES8", DPP_TX_ERR }, + { true, "TDPES9", DPP_TX_ERR }, + { true, "TDPES10", DPP_TX_ERR }, + { true, "TDPES11", DPP_TX_ERR }, + { true, "TDPES12", DPP_TX_ERR }, + { true, "TDPES13", DPP_TX_ERR }, + { true, "TDPES14", DPP_TX_ERR }, + { true, "TDPES15", DPP_TX_ERR }, + { true, "RDPES0", DPP_RX_ERR }, + { true, "RDPES1", DPP_RX_ERR }, + { true, "RDPES2", DPP_RX_ERR }, + { true, "RDPES3", DPP_RX_ERR }, + { true, "RDPES4", DPP_RX_ERR }, + { true, "RDPES5", DPP_RX_ERR }, + { true, "RDPES6", DPP_RX_ERR }, + { true, "RDPES7", DPP_RX_ERR }, + { true, "RDPES8", DPP_RX_ERR }, + { true, "RDPES9", DPP_RX_ERR }, + { true, "RDPES10", DPP_RX_ERR }, + { true, "RDPES11", DPP_RX_ERR }, + { true, "RDPES12", DPP_RX_ERR }, + { true, "RDPES13", DPP_RX_ERR }, + { true, "RDPES14", DPP_RX_ERR }, + { true, "RDPES15", DPP_RX_ERR }, +}; + +static void dwxgmac3_handle_dma_err(struct net_device *ndev, + void __iomem *ioaddr, bool correctable, + struct stmmac_safety_stats *stats) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS); + writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS); + + dwxgmac3_log_error(ndev, value, correctable, "DMA", + dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats); + + value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS); + writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS); + + dwxgmac3_log_error(ndev, value, false, "DMA_DPP", + dwxgmac3_dma_dpp_errors, + STAT_OFF(dma_dpp_errors), stats); +} + +static int +dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_feature_cfg *safety_cfg) +{ + u32 value; + + if (!asp) + return -EINVAL; + + /* 1. Enable Safety Features */ + writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL); + + /* 2. Enable MTL Safety Interrupts */ + value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE); + value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */ + value |= XGMAC_ECEIE; /* EST Memory Correctable Error */ + value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */ + value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */ + writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE); + + /* 3. Enable DMA Safety Interrupts */ + value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE); + value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */ + value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */ + writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE); + + /* Only ECC Protection for External Memory feature is selected */ + if (asp <= 0x1) + return 0; + + /* 4. Enable Parity and Timeout for FSM */ + value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL); + value |= XGMAC_PRTYEN; /* FSM Parity Feature */ + value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */ + writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL); + + /* 5. Enable Data Path Parity Protection */ + value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL); + /* already enabled by default, explicit enable it again */ + value &= ~XGMAC_DPP_DISABLE; + writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL); + + return 0; +} + +static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev, + void __iomem *ioaddr, + unsigned int asp, + struct stmmac_safety_stats *stats) +{ + bool err, corr; + u32 mtl, dma; + int ret = 0; + + if (!asp) + return -EINVAL; + + mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS); + dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS); + + err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS); + corr = false; + if (err) { + dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) || + (dma & (XGMAC_MSUIS | XGMAC_MSCIS)); + corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS); + if (err) { + dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in + * DMA_Safety_Interrupt_Status, so we handle DMA Data Path + * Parity Errors here + */ + err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS); + corr = dma & XGMAC_DECIS; + if (err) { + dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats); + ret |= !corr; + } + + return ret; +} + +static const struct dwxgmac3_error { + const struct dwxgmac3_error_desc *desc; +} dwxgmac3_all_errors[] = { + { dwxgmac3_mac_errors }, + { dwxgmac3_mtl_errors }, + { dwxgmac3_dma_errors }, + { dwxgmac3_dma_dpp_errors }, +}; + +static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, + const char **desc) +{ + int module = index / 32, offset = index % 32; + unsigned long *ptr = (unsigned long *)stats; + + if (module >= ARRAY_SIZE(dwxgmac3_all_errors)) + return -EINVAL; + if (!dwxgmac3_all_errors[module].desc[offset].valid) + return -EINVAL; + if (count) + *count = *(ptr + index); + if (desc) + *desc = dwxgmac3_all_errors[module].desc[offset].desc; + return 0; +} + +static int dwxgmac3_rxp_disable(void __iomem *ioaddr) +{ + u32 val = readl(ioaddr + XGMAC_MTL_OPMODE); + + val &= ~XGMAC_FRPE; + writel(val, ioaddr + XGMAC_MTL_OPMODE); + + return 0; +} + +static void dwxgmac3_rxp_enable(void __iomem *ioaddr) +{ + u32 val; + + val = readl(ioaddr + XGMAC_MTL_OPMODE); + val |= XGMAC_FRPE; + writel(val, ioaddr + XGMAC_MTL_OPMODE); +} + +static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr, + struct stmmac_tc_entry *entry, + int pos) +{ + int ret, i; + + for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) { + int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i; + u32 val; + + /* Wait for ready */ + ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST, + val, !(val & XGMAC_STARTBUSY), 1, 10000); + if (ret) + return ret; + + /* Write data */ + val = *((u32 *)&entry->val + i); + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA); + + /* Write pos */ + val = real_pos & XGMAC_ADDR; + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST); + + /* Write OP */ + val |= XGMAC_WRRDN; + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST); + + /* Start Write */ + val |= XGMAC_STARTBUSY; + writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST); + + /* Wait for done */ + ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST, + val, !(val & XGMAC_STARTBUSY), 1, 10000); + if (ret) + return ret; + } + + return 0; +} + +static struct stmmac_tc_entry * +dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries, + unsigned int count, u32 curr_prio) +{ + struct stmmac_tc_entry *entry; + u32 min_prio = ~0x0; + int i, min_prio_idx; + bool found = false; + + for (i = count - 1; i >= 0; i--) { + entry = &entries[i]; + + /* Do not update unused entries */ + if (!entry->in_use) + continue; + /* Do not update already updated entries (i.e. fragments) */ + if (entry->in_hw) + continue; + /* Let last entry be updated last */ + if (entry->is_last) + continue; + /* Do not return fragments */ + if (entry->is_frag) + continue; + /* Check if we already checked this prio */ + if (entry->prio < curr_prio) + continue; + /* Check if this is the minimum prio */ + if (entry->prio < min_prio) { + min_prio = entry->prio; + min_prio_idx = i; + found = true; + } + } + + if (found) + return &entries[min_prio_idx]; + return NULL; +} + +static int dwxgmac3_rxp_config(void __iomem *ioaddr, + struct stmmac_tc_entry *entries, + unsigned int count) +{ + struct stmmac_tc_entry *entry, *frag; + int i, ret, nve = 0; + u32 curr_prio = 0; + u32 old_val, val; + + /* Force disable RX */ + old_val = readl(ioaddr + XGMAC_RX_CONFIG); + val = old_val & ~XGMAC_CONFIG_RE; + writel(val, ioaddr + XGMAC_RX_CONFIG); + + /* Disable RX Parser */ + ret = dwxgmac3_rxp_disable(ioaddr); + if (ret) + goto re_enable; + + /* Set all entries as NOT in HW */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + entry->in_hw = false; + } + + /* Update entries by reverse order */ + while (1) { + entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio); + if (!entry) + break; + + curr_prio = entry->prio; + frag = entry->frag_ptr; + + /* Set special fragment requirements */ + if (frag) { + entry->val.af = 0; + entry->val.rf = 0; + entry->val.nc = 1; + entry->val.ok_index = nve + 2; + } + + ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + entry->in_hw = true; + + if (frag && !frag->in_hw) { + ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve); + if (ret) + goto re_enable; + frag->table_pos = nve++; + frag->in_hw = true; + } + } + + if (!nve) + goto re_enable; + + /* Update all pass entry */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + if (!entry->is_last) + continue; + + ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + } + + /* Assume n. of parsable entries == n. of valid entries */ + val = (nve << 16) & XGMAC_NPE; + val |= nve & XGMAC_NVE; + writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS); + + /* Enable RX Parser */ + dwxgmac3_rxp_enable(ioaddr); + +re_enable: + /* Re-enable RX */ + writel(old_val, ioaddr + XGMAC_RX_CONFIG); + return ret; +} + +static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS, + value, value & XGMAC_TXTSC, 100, 10000)) + return -EBUSY; + + *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO; + *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL; + return 0; +} + +static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags) +{ + u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index)); + u32 val = readl(ioaddr + XGMAC_PPS_CONTROL); + u64 period; + + if (!cfg->available) + return -EINVAL; + if (tnsec & XGMAC_TRGTBUSY0) + return -EBUSY; + if (!sub_second_inc || !systime_flags) + return -EINVAL; + + val &= ~XGMAC_PPSx_MASK(index); + + if (!enable) { + val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP); + writel(val, ioaddr + XGMAC_PPS_CONTROL); + return 0; + } + + val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START); + val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START); + + /* XGMAC Core has 4 PPS outputs at most. + * + * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for + * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default, + * and can not be switched to Fixed mode, since PPSEN{1,2,3} are + * read-only reserved to 0. + * But we always set PPSEN{1,2,3} do not make things worse ;-) + * + * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must + * be set, or the PPS outputs stay in Fixed PPS mode by default. + */ + val |= XGMAC_PPSENx(index); + + writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index)); + + if (!(systime_flags & PTP_TCR_TSCTRLSSR)) + cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; + writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index)); + + period = cfg->period.tv_sec * 1000000000; + period += cfg->period.tv_nsec; + + do_div(period, sub_second_inc); + + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index)); + + period >>= 1; + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index)); + + /* Finally, activate it */ + writel(val, ioaddr + XGMAC_PPS_CONTROL); + return 0; +} + +static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val) +{ + u32 value = readl(ioaddr + XGMAC_TX_CONFIG); + + value &= ~XGMAC_CONFIG_SARC; + value |= val << XGMAC_CONFIG_SARC_SHIFT; + + writel(value, ioaddr + XGMAC_TX_CONFIG); +} + +static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XGMAC_VLAN_INCL); + value |= XGMAC_VLAN_VLTI; + value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */ + value &= ~XGMAC_VLAN_VLC; + value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC; + writel(value, ioaddr + XGMAC_VLAN_INCL); +} + +static int dwxgmac2_filter_wait(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value, + !(value & XGMAC_XB), 100, 10000)) + return -EBUSY; + return 0; +} + +static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no, + u8 reg, u32 *data) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + ret = dwxgmac2_filter_wait(hw); + if (ret) + return ret; + + value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT; + value |= XGMAC_TT | XGMAC_XB; + writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL); + + ret = dwxgmac2_filter_wait(hw); + if (ret) + return ret; + + *data = readl(ioaddr + XGMAC_L3L4_DATA); + return 0; +} + +static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no, + u8 reg, u32 data) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + ret = dwxgmac2_filter_wait(hw); + if (ret) + return ret; + + writel(data, ioaddr + XGMAC_L3L4_DATA); + + value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT; + value |= XGMAC_XB; + writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL); + + return dwxgmac2_filter_wait(hw); +} + +static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool ipv6, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + value = readl(ioaddr + XGMAC_PACKET_FILTER); + value |= XGMAC_FILTER_IPFE; + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value); + if (ret) + return ret; + + /* For IPv6 not both SA/DA filters can be active */ + if (ipv6) { + value |= XGMAC_L3PEN0; + value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0); + value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0); + if (sa) { + value |= XGMAC_L3SAM0; + if (inv) + value |= XGMAC_L3SAIM0; + } else { + value |= XGMAC_L3DAM0; + if (inv) + value |= XGMAC_L3DAIM0; + } + } else { + value &= ~XGMAC_L3PEN0; + if (sa) { + value |= XGMAC_L3SAM0; + if (inv) + value |= XGMAC_L3SAIM0; + } else { + value |= XGMAC_L3DAM0; + if (inv) + value |= XGMAC_L3DAIM0; + } + } + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value); + if (ret) + return ret; + + if (sa) { + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match); + if (ret) + return ret; + } else { + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match); + if (ret) + return ret; + } + + if (!en) + return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0); + + return 0; +} + +static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no, + bool en, bool udp, bool sa, bool inv, + u32 match) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + int ret; + + value = readl(ioaddr + XGMAC_PACKET_FILTER); + value |= XGMAC_FILTER_IPFE; + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value); + if (ret) + return ret; + + if (udp) { + value |= XGMAC_L4PEN0; + } else { + value &= ~XGMAC_L4PEN0; + } + + value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0); + value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0); + if (sa) { + value |= XGMAC_L4SPM0; + if (inv) + value |= XGMAC_L4SPIM0; + } else { + value |= XGMAC_L4DPM0; + if (inv) + value |= XGMAC_L4DPIM0; + } + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value); + if (ret) + return ret; + + if (sa) { + value = match & XGMAC_L4SP0; + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); + if (ret) + return ret; + } else { + value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0; + + ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value); + if (ret) + return ret; + } + + if (!en) + return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0); + + return 0; +} + +static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, + u32 addr) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(addr, ioaddr + XGMAC_ARP_ADDR); + + value = readl(ioaddr + XGMAC_RX_CONFIG); + if (en) + value |= XGMAC_CONFIG_ARPEN; + else + value &= ~XGMAC_CONFIG_ARPEN; + writel(value, ioaddr + XGMAC_RX_CONFIG); +} + +static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl) +{ + u32 ctrl; + + writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA); + + ctrl = (reg << XGMAC_ADDR_SHIFT); + ctrl |= gcl ? 0 : XGMAC_GCRR; + + writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL); + + ctrl |= XGMAC_SRWO; + writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL); + + return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL, + ctrl, !(ctrl & XGMAC_SRWO), 100, 5000); +} + +static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, + unsigned int ptp_rate) +{ + int i, ret = 0x0; + u32 ctrl; + + ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false); + ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false); + if (ret) + return ret; + + for (i = 0; i < cfg->gcl_size; i++) { + ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true); + if (ret) + return ret; + } + + ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL); + ctrl &= ~XGMAC_PTOV; + ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT; + if (cfg->enable) + ctrl |= XGMAC_EEST | XGMAC_SSWL; + else + ctrl &= ~XGMAC_EEST; + + writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL); + return 0; +} + +static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, + u32 num_rxq, bool enable) +{ + u32 value; + + if (!enable) { + value = readl(ioaddr + XGMAC_FPE_CTRL_STS); + + value &= ~XGMAC_EFPE; + + writel(value, ioaddr + XGMAC_FPE_CTRL_STS); + return; + } + + value = readl(ioaddr + XGMAC_RXQ_CTRL1); + value &= ~XGMAC_RQ; + value |= (num_rxq - 1) << XGMAC_RQ_SHIFT; + writel(value, ioaddr + XGMAC_RXQ_CTRL1); + + value = readl(ioaddr + XGMAC_FPE_CTRL_STS); + value |= XGMAC_EFPE; + writel(value, ioaddr + XGMAC_FPE_CTRL_STS); +} + +const struct stmmac_ops dwxgmac210_ops = { + .core_init = dwxgmac2_core_init, + .set_mac = dwxgmac2_set_mac, + .rx_ipc = dwxgmac2_rx_ipc, + .rx_queue_enable = dwxgmac2_rx_queue_enable, + .rx_queue_prio = dwxgmac2_rx_queue_prio, + .tx_queue_prio = dwxgmac2_tx_queue_prio, + .rx_queue_routing = NULL, + .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, + .config_cbs = dwxgmac2_config_cbs, + .dump_regs = dwxgmac2_dump_regs, + .host_irq_status = dwxgmac2_host_irq_status, + .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, + .flow_ctrl = dwxgmac2_flow_ctrl, + .pmt = dwxgmac2_pmt, + .set_umac_addr = dwxgmac2_set_umac_addr, + .get_umac_addr = dwxgmac2_get_umac_addr, + .set_eee_mode = dwxgmac2_set_eee_mode, + .reset_eee_mode = dwxgmac2_reset_eee_mode, + .set_eee_timer = dwxgmac2_set_eee_timer, + .set_eee_pls = dwxgmac2_set_eee_pls, + .pcs_ctrl_ane = NULL, + .pcs_rane = NULL, + .pcs_get_adv_lp = NULL, + .debug = NULL, + .set_filter = dwxgmac2_set_filter, + .safety_feat_config = dwxgmac3_safety_feat_config, + .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status, + .safety_feat_dump = dwxgmac3_safety_feat_dump, + .set_mac_loopback = dwxgmac2_set_mac_loopback, + .rss_configure = dwxgmac2_rss_configure, + .update_vlan_hash = dwxgmac2_update_vlan_hash, + .rxp_config = dwxgmac3_rxp_config, + .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp, + .flex_pps_config = dwxgmac2_flex_pps_config, + .sarc_configure = dwxgmac2_sarc_configure, + .enable_vlan = dwxgmac2_enable_vlan, + .config_l3_filter = dwxgmac2_config_l3_filter, + .config_l4_filter = dwxgmac2_config_l4_filter, + .set_arp_offload = dwxgmac2_set_arp_offload, + .est_configure = dwxgmac3_est_configure, + .fpe_configure = dwxgmac3_fpe_configure, +}; + +static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, + u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue); + if (mode == MTL_QUEUE_AVB) + value |= 0x1 << XGMAC_RXQEN_SHIFT(queue); + else if (mode == MTL_QUEUE_DCB) + value |= 0x2 << XGMAC_RXQEN_SHIFT(queue); + writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0); +} + +const struct stmmac_ops dwxlgmac2_ops = { + .core_init = dwxgmac2_core_init, + .set_mac = dwxgmac2_set_mac, + .rx_ipc = dwxgmac2_rx_ipc, + .rx_queue_enable = dwxlgmac2_rx_queue_enable, + .rx_queue_prio = dwxgmac2_rx_queue_prio, + .tx_queue_prio = dwxgmac2_tx_queue_prio, + .rx_queue_routing = NULL, + .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, + .config_cbs = dwxgmac2_config_cbs, + .dump_regs = dwxgmac2_dump_regs, + .host_irq_status = dwxgmac2_host_irq_status, + .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, + .flow_ctrl = dwxgmac2_flow_ctrl, + .pmt = dwxgmac2_pmt, + .set_umac_addr = dwxgmac2_set_umac_addr, + .get_umac_addr = dwxgmac2_get_umac_addr, + .set_eee_mode = dwxgmac2_set_eee_mode, + .reset_eee_mode = dwxgmac2_reset_eee_mode, + .set_eee_timer = dwxgmac2_set_eee_timer, + .set_eee_pls = dwxgmac2_set_eee_pls, + .pcs_ctrl_ane = NULL, + .pcs_rane = NULL, + .pcs_get_adv_lp = NULL, + .debug = NULL, + .set_filter = dwxgmac2_set_filter, + .safety_feat_config = dwxgmac3_safety_feat_config, + .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status, + .safety_feat_dump = dwxgmac3_safety_feat_dump, + .set_mac_loopback = dwxgmac2_set_mac_loopback, + .rss_configure = dwxgmac2_rss_configure, + .update_vlan_hash = dwxgmac2_update_vlan_hash, + .rxp_config = dwxgmac3_rxp_config, + .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp, + .flex_pps_config = dwxgmac2_flex_pps_config, + .sarc_configure = dwxgmac2_sarc_configure, + .enable_vlan = dwxgmac2_enable_vlan, + .config_l3_filter = dwxgmac2_config_l3_filter, + .config_l4_filter = dwxgmac2_config_l4_filter, + .set_arp_offload = dwxgmac2_set_arp_offload, + .est_configure = dwxgmac3_est_configure, + .fpe_configure = dwxgmac3_fpe_configure, +}; + +int dwxgmac2_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tXGMAC2\n"); + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.duplex = 0; + mac->link.speed10 = XGMAC_CONFIG_SS_10_MII; + mac->link.speed100 = XGMAC_CONFIG_SS_100_MII; + mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII; + mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII; + mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500; + mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000; + mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000; + mac->link.speed_mask = XGMAC_CONFIG_SS_MASK; + + mac->mii.addr = XGMAC_MDIO_ADDR; + mac->mii.data = XGMAC_MDIO_DATA; + mac->mii.addr_shift = 16; + mac->mii.addr_mask = GENMASK(20, 16); + mac->mii.reg_shift = 0; + mac->mii.reg_mask = GENMASK(15, 0); + mac->mii.clk_csr_shift = 19; + mac->mii.clk_csr_mask = GENMASK(21, 19); + + return 0; +} + +int dwxlgmac2_setup(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + dev_info(priv->device, "\tXLGMAC\n"); + + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->link.duplex = 0; + mac->link.speed1000 = XLGMAC_CONFIG_SS_1000; + mac->link.speed2500 = XLGMAC_CONFIG_SS_2500; + mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G; + mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G; + mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G; + mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G; + mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G; + mac->link.speed_mask = XLGMAC_CONFIG_SS; + + mac->mii.addr = XGMAC_MDIO_ADDR; + mac->mii.data = XGMAC_MDIO_DATA; + mac->mii.addr_shift = 16; + mac->mii.addr_mask = GENMASK(20, 16); + mac->mii.reg_shift = 0; + mac->mii.reg_mask = GENMASK(15, 0); + mac->mii.clk_csr_shift = 19; + mac->mii.clk_csr_mask = GENMASK(21, 19); + + return 0; +} diff --git a/devices/stmmac/dwxgmac2_descs-6.1-ethercat.c b/devices/stmmac/dwxgmac2_descs-6.1-ethercat.c new file mode 100644 index 00000000..a8f64aaf --- /dev/null +++ b/devices/stmmac/dwxgmac2_descs-6.1-ethercat.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC support. + */ + +#include +#include "common-6.1-ethercat.h" +#include "dwxgmac2-6.1-ethercat.h" + +static int dwxgmac2_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + int ret = tx_done; + + if (unlikely(tdes3 & XGMAC_TDES3_OWN)) + return tx_dma_own; + if (likely(!(tdes3 & XGMAC_TDES3_LD))) + return tx_not_ls; + + return ret; +} + +static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + unsigned int rdes3 = le32_to_cpu(p->des3); + + if (unlikely(rdes3 & XGMAC_RDES3_OWN)) + return dma_own; + if (unlikely(rdes3 & XGMAC_RDES3_CTXT)) + return discard_frame; + if (likely(!(rdes3 & XGMAC_RDES3_LD))) + return rx_not_ls; + if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD))) + return discard_frame; + + return good_frame; +} + +static int dwxgmac2_get_tx_len(struct dma_desc *p) +{ + return (le32_to_cpu(p->des2) & XGMAC_TDES2_B1L); +} + +static int dwxgmac2_get_tx_owner(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & XGMAC_TDES3_OWN) > 0; +} + +static void dwxgmac2_set_tx_owner(struct dma_desc *p) +{ + p->des3 |= cpu_to_le32(XGMAC_TDES3_OWN); +} + +static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic) +{ + p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN); + + if (!disable_rx_ic) + p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC); +} + +static int dwxgmac2_get_tx_ls(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0; +} + +static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe) +{ + return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL); +} + +static void dwxgmac2_enable_tx_timestamp(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(XGMAC_TDES2_TTSE); +} + +static int dwxgmac2_get_tx_timestamp_status(struct dma_desc *p) +{ + return 0; /* Not supported */ +} + +static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns = 0; + + ns += le32_to_cpu(p->des1) * 1000000000ULL; + ns += le32_to_cpu(p->des0); + + *ts = ns; +} + +static int dwxgmac2_rx_check_timestamp(void *desc) +{ + struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes3 = le32_to_cpu(p->des3); + bool desc_valid, ts_valid; + + dma_rmb(); + + desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT); + ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA); + + if (likely(desc_valid && ts_valid)) { + if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + return -EINVAL; + return 0; + } + + return -EINVAL; +} + +static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes3 = le32_to_cpu(p->des3); + int ret = -EBUSY; + + if (likely(rdes3 & XGMAC_RDES3_CDA)) + ret = dwxgmac2_rx_check_timestamp(next_desc); + + return !ret; +} + +static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end, int bfsize) +{ + dwxgmac2_set_rx_owner(p, disable_rx_ic); +} + +static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls, unsigned int tot_pkt_len) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L); + + tdes3 |= tot_pkt_len & XGMAC_TDES3_FL; + if (is_fs) + tdes3 |= XGMAC_TDES3_FD; + else + tdes3 &= ~XGMAC_TDES3_FD; + + if (csum_flag) + tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT; + else + tdes3 &= ~XGMAC_TDES3_CIC; + + if (ls) + tdes3 |= XGMAC_TDES3_LD; + else + tdes3 &= ~XGMAC_TDES3_LD; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= XGMAC_TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, + int len1, int len2, bool tx_own, + bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + if (len1) + p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L); + if (len2) + p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) & + XGMAC_TDES2_B2L); + if (is_fs) { + tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE; + tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) & + XGMAC_TDES3_THL; + tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL; + } else { + tdes3 &= ~XGMAC_TDES3_FD; + } + + if (ls) + tdes3 |= XGMAC_TDES3_LD; + else + tdes3 &= ~XGMAC_TDES3_LD; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= XGMAC_TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwxgmac2_set_tx_ic(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(XGMAC_TDES2_IOC); +} + +static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = cpu_to_le32(mss); + p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV); +} + +static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des0 = cpu_to_le32(lower_32_bits(addr)); + p->des1 = cpu_to_le32(upper_32_bits(addr)); +} + +static void dwxgmac2_clear(struct dma_desc *p) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash, + enum pkt_hash_types *type) +{ + unsigned int rdes3 = le32_to_cpu(p->des3); + u32 ptype; + + if (rdes3 & XGMAC_RDES3_RSV) { + ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT; + + switch (ptype) { + case XGMAC_L34T_IP4TCP: + case XGMAC_L34T_IP4UDP: + case XGMAC_L34T_IP6TCP: + case XGMAC_L34T_IP6UDP: + *type = PKT_HASH_TYPE_L4; + break; + default: + *type = PKT_HASH_TYPE_L3; + break; + } + + *hash = le32_to_cpu(p->des1); + return 0; + } + + return -EINVAL; +} + +static void dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len) +{ + if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T) + *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; +} + +static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool is_valid) +{ + p->des2 = cpu_to_le32(lower_32_bits(addr)); + p->des3 = cpu_to_le32(upper_32_bits(addr)); +} + +static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type) +{ + sarc_type <<= XGMAC_TDES3_SAIC_SHIFT; + + p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC); +} + +static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag, + u32 inner_type) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + + /* Inner VLAN */ + if (inner_type) { + u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT; + + des &= XGMAC_TDES2_IVT; + p->des2 = cpu_to_le32(des); + + des = inner_type << XGMAC_TDES3_IVTIR_SHIFT; + des &= XGMAC_TDES3_IVTIR; + p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV); + } + + /* Outer VLAN */ + p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT); + p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV); + + p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT); +} + +static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type) +{ + type <<= XGMAC_TDES2_VTIR_SHIFT; + p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR); +} + +static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec) +{ + p->des4 = cpu_to_le32((sec & XGMAC_TDES0_LT) | XGMAC_TDES0_LTV); + p->des5 = cpu_to_le32(nsec & XGMAC_TDES1_LT); + p->des6 = 0; + p->des7 = 0; +} + +const struct stmmac_desc_ops dwxgmac210_desc_ops = { + .tx_status = dwxgmac2_get_tx_status, + .rx_status = dwxgmac2_get_rx_status, + .get_tx_len = dwxgmac2_get_tx_len, + .get_tx_owner = dwxgmac2_get_tx_owner, + .set_tx_owner = dwxgmac2_set_tx_owner, + .set_rx_owner = dwxgmac2_set_rx_owner, + .get_tx_ls = dwxgmac2_get_tx_ls, + .get_rx_frame_len = dwxgmac2_get_rx_frame_len, + .enable_tx_timestamp = dwxgmac2_enable_tx_timestamp, + .get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status, + .get_rx_timestamp_status = dwxgmac2_get_rx_timestamp_status, + .get_timestamp = dwxgmac2_get_timestamp, + .set_tx_ic = dwxgmac2_set_tx_ic, + .prepare_tx_desc = dwxgmac2_prepare_tx_desc, + .prepare_tso_tx_desc = dwxgmac2_prepare_tso_tx_desc, + .release_tx_desc = dwxgmac2_release_tx_desc, + .init_rx_desc = dwxgmac2_init_rx_desc, + .init_tx_desc = dwxgmac2_init_tx_desc, + .set_mss = dwxgmac2_set_mss, + .set_addr = dwxgmac2_set_addr, + .clear = dwxgmac2_clear, + .get_rx_hash = dwxgmac2_get_rx_hash, + .get_rx_header_len = dwxgmac2_get_rx_header_len, + .set_sec_addr = dwxgmac2_set_sec_addr, + .set_sarc = dwxgmac2_set_sarc, + .set_vlan_tag = dwxgmac2_set_vlan_tag, + .set_vlan = dwxgmac2_set_vlan, + .set_tbs = dwxgmac2_set_tbs, +}; diff --git a/devices/stmmac/dwxgmac2_descs-6.1-orig.c b/devices/stmmac/dwxgmac2_descs-6.1-orig.c new file mode 100644 index 00000000..b1f0c398 --- /dev/null +++ b/devices/stmmac/dwxgmac2_descs-6.1-orig.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC support. + */ + +#include +#include "common.h" +#include "dwxgmac2.h" + +static int dwxgmac2_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + int ret = tx_done; + + if (unlikely(tdes3 & XGMAC_TDES3_OWN)) + return tx_dma_own; + if (likely(!(tdes3 & XGMAC_TDES3_LD))) + return tx_not_ls; + + return ret; +} + +static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + unsigned int rdes3 = le32_to_cpu(p->des3); + + if (unlikely(rdes3 & XGMAC_RDES3_OWN)) + return dma_own; + if (unlikely(rdes3 & XGMAC_RDES3_CTXT)) + return discard_frame; + if (likely(!(rdes3 & XGMAC_RDES3_LD))) + return rx_not_ls; + if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD))) + return discard_frame; + + return good_frame; +} + +static int dwxgmac2_get_tx_len(struct dma_desc *p) +{ + return (le32_to_cpu(p->des2) & XGMAC_TDES2_B1L); +} + +static int dwxgmac2_get_tx_owner(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & XGMAC_TDES3_OWN) > 0; +} + +static void dwxgmac2_set_tx_owner(struct dma_desc *p) +{ + p->des3 |= cpu_to_le32(XGMAC_TDES3_OWN); +} + +static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic) +{ + p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN); + + if (!disable_rx_ic) + p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC); +} + +static int dwxgmac2_get_tx_ls(struct dma_desc *p) +{ + return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0; +} + +static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe) +{ + return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL); +} + +static void dwxgmac2_enable_tx_timestamp(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(XGMAC_TDES2_TTSE); +} + +static int dwxgmac2_get_tx_timestamp_status(struct dma_desc *p) +{ + return 0; /* Not supported */ +} + +static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns = 0; + + ns += le32_to_cpu(p->des1) * 1000000000ULL; + ns += le32_to_cpu(p->des0); + + *ts = ns; +} + +static int dwxgmac2_rx_check_timestamp(void *desc) +{ + struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes3 = le32_to_cpu(p->des3); + bool desc_valid, ts_valid; + + dma_rmb(); + + desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT); + ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA); + + if (likely(desc_valid && ts_valid)) { + if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + return -EINVAL; + return 0; + } + + return -EINVAL; +} + +static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes3 = le32_to_cpu(p->des3); + int ret = -EBUSY; + + if (likely(rdes3 & XGMAC_RDES3_CDA)) + ret = dwxgmac2_rx_check_timestamp(next_desc); + + return !ret; +} + +static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end, int bfsize) +{ + dwxgmac2_set_rx_owner(p, disable_rx_ic); +} + +static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls, unsigned int tot_pkt_len) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L); + + tdes3 |= tot_pkt_len & XGMAC_TDES3_FL; + if (is_fs) + tdes3 |= XGMAC_TDES3_FD; + else + tdes3 &= ~XGMAC_TDES3_FD; + + if (csum_flag) + tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT; + else + tdes3 &= ~XGMAC_TDES3_CIC; + + if (ls) + tdes3 |= XGMAC_TDES3_LD; + else + tdes3 &= ~XGMAC_TDES3_LD; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= XGMAC_TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, + int len1, int len2, bool tx_own, + bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen) +{ + unsigned int tdes3 = le32_to_cpu(p->des3); + + if (len1) + p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L); + if (len2) + p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) & + XGMAC_TDES2_B2L); + if (is_fs) { + tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE; + tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) & + XGMAC_TDES3_THL; + tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL; + } else { + tdes3 &= ~XGMAC_TDES3_FD; + } + + if (ls) + tdes3 |= XGMAC_TDES3_LD; + else + tdes3 &= ~XGMAC_TDES3_LD; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= XGMAC_TDES3_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des3 = cpu_to_le32(tdes3); +} + +static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwxgmac2_set_tx_ic(struct dma_desc *p) +{ + p->des2 |= cpu_to_le32(XGMAC_TDES2_IOC); +} + +static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = cpu_to_le32(mss); + p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV); +} + +static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des0 = cpu_to_le32(lower_32_bits(addr)); + p->des1 = cpu_to_le32(upper_32_bits(addr)); +} + +static void dwxgmac2_clear(struct dma_desc *p) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash, + enum pkt_hash_types *type) +{ + unsigned int rdes3 = le32_to_cpu(p->des3); + u32 ptype; + + if (rdes3 & XGMAC_RDES3_RSV) { + ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT; + + switch (ptype) { + case XGMAC_L34T_IP4TCP: + case XGMAC_L34T_IP4UDP: + case XGMAC_L34T_IP6TCP: + case XGMAC_L34T_IP6UDP: + *type = PKT_HASH_TYPE_L4; + break; + default: + *type = PKT_HASH_TYPE_L3; + break; + } + + *hash = le32_to_cpu(p->des1); + return 0; + } + + return -EINVAL; +} + +static void dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len) +{ + if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T) + *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; +} + +static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool is_valid) +{ + p->des2 = cpu_to_le32(lower_32_bits(addr)); + p->des3 = cpu_to_le32(upper_32_bits(addr)); +} + +static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type) +{ + sarc_type <<= XGMAC_TDES3_SAIC_SHIFT; + + p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC); +} + +static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag, + u32 inner_type) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + + /* Inner VLAN */ + if (inner_type) { + u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT; + + des &= XGMAC_TDES2_IVT; + p->des2 = cpu_to_le32(des); + + des = inner_type << XGMAC_TDES3_IVTIR_SHIFT; + des &= XGMAC_TDES3_IVTIR; + p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV); + } + + /* Outer VLAN */ + p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT); + p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV); + + p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT); +} + +static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type) +{ + type <<= XGMAC_TDES2_VTIR_SHIFT; + p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR); +} + +static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec) +{ + p->des4 = cpu_to_le32((sec & XGMAC_TDES0_LT) | XGMAC_TDES0_LTV); + p->des5 = cpu_to_le32(nsec & XGMAC_TDES1_LT); + p->des6 = 0; + p->des7 = 0; +} + +const struct stmmac_desc_ops dwxgmac210_desc_ops = { + .tx_status = dwxgmac2_get_tx_status, + .rx_status = dwxgmac2_get_rx_status, + .get_tx_len = dwxgmac2_get_tx_len, + .get_tx_owner = dwxgmac2_get_tx_owner, + .set_tx_owner = dwxgmac2_set_tx_owner, + .set_rx_owner = dwxgmac2_set_rx_owner, + .get_tx_ls = dwxgmac2_get_tx_ls, + .get_rx_frame_len = dwxgmac2_get_rx_frame_len, + .enable_tx_timestamp = dwxgmac2_enable_tx_timestamp, + .get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status, + .get_rx_timestamp_status = dwxgmac2_get_rx_timestamp_status, + .get_timestamp = dwxgmac2_get_timestamp, + .set_tx_ic = dwxgmac2_set_tx_ic, + .prepare_tx_desc = dwxgmac2_prepare_tx_desc, + .prepare_tso_tx_desc = dwxgmac2_prepare_tso_tx_desc, + .release_tx_desc = dwxgmac2_release_tx_desc, + .init_rx_desc = dwxgmac2_init_rx_desc, + .init_tx_desc = dwxgmac2_init_tx_desc, + .set_mss = dwxgmac2_set_mss, + .set_addr = dwxgmac2_set_addr, + .clear = dwxgmac2_clear, + .get_rx_hash = dwxgmac2_get_rx_hash, + .get_rx_header_len = dwxgmac2_get_rx_header_len, + .set_sec_addr = dwxgmac2_set_sec_addr, + .set_sarc = dwxgmac2_set_sarc, + .set_vlan_tag = dwxgmac2_set_vlan_tag, + .set_vlan = dwxgmac2_set_vlan, + .set_tbs = dwxgmac2_set_tbs, +}; diff --git a/devices/stmmac/dwxgmac2_dma-6.1-ethercat.c b/devices/stmmac/dwxgmac2_dma-6.1-ethercat.c new file mode 100644 index 00000000..67ed03a2 --- /dev/null +++ b/devices/stmmac/dwxgmac2_dma-6.1-ethercat.c @@ -0,0 +1,582 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC support. + */ + +#include +#include "stmmac-6.1-ethercat.h" +#include "dwxgmac2-6.1-ethercat.h" + +static int dwxgmac2_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + XGMAC_DMA_MODE); + + /* DMA SW reset */ + writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE); + + return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value, + !(value & XGMAC_SWR), 0, 100000); +} + +static void dwxgmac2_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) +{ + u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); + + if (dma_cfg->aal) + value |= XGMAC_AAL; + + if (dma_cfg->eame) + value |= XGMAC_EAME; + + writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); +} + +static void dwxgmac2_dma_init_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan)); + + if (dma_cfg->pblx8) + value |= XGMAC_PBLx8; + + writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan)); + writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); +} + +static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t phy, u32 chan) +{ + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value &= ~XGMAC_RxPBL; + value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + + writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan)); + writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan)); +} + +static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t phy, u32 chan) +{ + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + value &= ~XGMAC_TxPBL; + value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL; + value |= XGMAC_OSP; + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan)); + writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan)); +} + +static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); + int i; + + if (axi->axi_lpi_en) + value |= XGMAC_EN_LPI; + if (axi->axi_xit_frm) + value |= XGMAC_LPI_XIT_PKT; + + value &= ~XGMAC_WR_OSR_LMT; + value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) & + XGMAC_WR_OSR_LMT; + + value &= ~XGMAC_RD_OSR_LMT; + value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) & + XGMAC_RD_OSR_LMT; + + if (!axi->axi_fb) + value |= XGMAC_UNDEF; + + value &= ~XGMAC_BLEN; + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= XGMAC_BLEN256; + break; + case 128: + value |= XGMAC_BLEN128; + break; + case 64: + value |= XGMAC_BLEN64; + break; + case 32: + value |= XGMAC_BLEN32; + break; + case 16: + value |= XGMAC_BLEN16; + break; + case 8: + value |= XGMAC_BLEN8; + break; + case 4: + value |= XGMAC_BLEN4; + break; + } + } + + writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); + writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL); + writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL); +} + +static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + +static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); + unsigned int rqs = fifosz / 256 - 1; + + if (mode == SF_DMA_MODE) { + value |= XGMAC_RSF; + } else { + value &= ~XGMAC_RSF; + value &= ~XGMAC_RTC; + + if (mode <= 64) + value |= 0x0 << XGMAC_RTC_SHIFT; + else if (mode <= 96) + value |= 0x2 << XGMAC_RTC_SHIFT; + else + value |= 0x3 << XGMAC_RTC_SHIFT; + } + + value &= ~XGMAC_RQS; + value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS; + + if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { + u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel)); + unsigned int rfd, rfa; + + value |= XGMAC_EHFC; + + /* Set Threshold for Activating Flow Control to min 2 frames, + * i.e. 1500 * 2 = 3000 bytes. + * + * Set Threshold for Deactivating Flow Control to min 1 frame, + * i.e. 1500 bytes. + */ + switch (fifosz) { + case 4096: + /* This violates the above formula because of FIFO size + * limit therefore overflow may occur in spite of this. + */ + rfd = 0x03; /* Full-2.5K */ + rfa = 0x01; /* Full-1.5K */ + break; + + default: + rfd = 0x07; /* Full-4.5K */ + rfa = 0x04; /* Full-3K */ + break; + } + + flow &= ~XGMAC_RFD; + flow |= rfd << XGMAC_RFD_SHIFT; + + flow &= ~XGMAC_RFA; + flow |= rfa << XGMAC_RFA_SHIFT; + + writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel)); + } + + writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); + + /* Enable MTL RX overflow */ + value = readl(ioaddr + XGMAC_MTL_QINTEN(channel)); + writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel)); +} + +static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + unsigned int tqs = fifosz / 256 - 1; + + if (mode == SF_DMA_MODE) { + value |= XGMAC_TSF; + } else { + value &= ~XGMAC_TSF; + value &= ~XGMAC_TTC; + + if (mode <= 64) + value |= 0x0 << XGMAC_TTC_SHIFT; + else if (mode <= 96) + value |= 0x2 << XGMAC_TTC_SHIFT; + else if (mode <= 128) + value |= 0x3 << XGMAC_TTC_SHIFT; + else if (mode <= 192) + value |= 0x4 << XGMAC_TTC_SHIFT; + else if (mode <= 256) + value |= 0x5 << XGMAC_TTC_SHIFT; + else if (mode <= 384) + value |= 0x6 << XGMAC_TTC_SHIFT; + else + value |= 0x7 << XGMAC_TTC_SHIFT; + } + + /* Use static TC to Queue mapping */ + value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP; + + value &= ~XGMAC_TXQEN; + if (qmode != MTL_QUEUE_AVB) + value |= 0x2 << XGMAC_TXQEN_SHIFT; + else + value |= 0x1 << XGMAC_TXQEN_SHIFT; + + value &= ~XGMAC_TQS; + value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS; + + writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); +} + +static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan, + bool rx, bool tx) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); + + if (rx) + value |= XGMAC_DMA_INT_DEFAULT_RX; + if (tx) + value |= XGMAC_DMA_INT_DEFAULT_TX; + + writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); +} + +static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan, + bool rx, bool tx) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); + + if (rx) + value &= ~XGMAC_DMA_INT_DEFAULT_RX; + if (tx) + value &= ~XGMAC_DMA_INT_DEFAULT_TX; + + writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); +} + +static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + value |= XGMAC_TXST; + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_TX_CONFIG); + value |= XGMAC_CONFIG_TE; + writel(value, ioaddr + XGMAC_TX_CONFIG); +} + +static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + value &= ~XGMAC_TXST; + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_TX_CONFIG); + value &= ~XGMAC_CONFIG_TE; + writel(value, ioaddr + XGMAC_TX_CONFIG); +} + +static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value |= XGMAC_RXST; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_RX_CONFIG); + value |= XGMAC_CONFIG_RE; + writel(value, ioaddr + XGMAC_RX_CONFIG); +} + +static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value &= ~XGMAC_RXST; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); +} + +static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, + u32 dir) +{ + u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); + u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); + int ret = 0; + + if (dir == DMA_DIR_RX) + intr_status &= XGMAC_DMA_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= XGMAC_DMA_STATUS_MSK_TX; + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & XGMAC_AIS)) { + if (unlikely(intr_status & XGMAC_RBU)) { + x->rx_buf_unav_irq++; + ret |= handle_rx; + } + if (unlikely(intr_status & XGMAC_TPS)) { + x->tx_process_stopped_irq++; + ret |= tx_hard_error; + } + if (unlikely(intr_status & XGMAC_FBE)) { + x->fatal_bus_error_irq++; + ret |= tx_hard_error; + } + } + + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & XGMAC_NIS)) { + x->normal_irq_n++; + + if (likely(intr_status & XGMAC_RI)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { + x->tx_normal_irq_n++; + ret |= handle_tx; + } + } + + /* Clear interrupts */ + writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan)); + + return ret; +} + +static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) +{ + u32 hw_cap; + + /* MAC HW feature 0 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0); + dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27; + dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16; + dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14; + dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; + dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; + dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; + dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10); + dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9; + dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; + dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; + dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6; + dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4; + dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1; + + /* MAC HW feature 1 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1); + dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27; + dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24; + dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20; + dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18; + dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17; + + dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14; + switch (dma_cap->addr64) { + case 0: + dma_cap->addr64 = 32; + break; + case 1: + dma_cap->addr64 = 40; + break; + case 2: + dma_cap->addr64 = 48; + break; + default: + dma_cap->addr64 = 32; + break; + } + + dma_cap->tx_fifo_size = + 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6); + dma_cap->rx_fifo_size = + 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0); + + /* MAC HW feature 2 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2); + dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24; + dma_cap->number_tx_channel = + ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1; + dma_cap->number_rx_channel = + ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1; + dma_cap->number_tx_queues = + ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1; + dma_cap->number_rx_queues = + ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1; + + /* MAC HW feature 3 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3); + dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27; + dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26; + dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23; + dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20; + dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19; + dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14; + dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13; + dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11; + dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9; + dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3; + + return 0; +} + +static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) +{ + writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue)); +} + +static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan)); +} + +static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan)); +} + +static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan) +{ + writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan)); +} + +static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan) +{ + writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan)); +} + +static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + if (en) + value |= XGMAC_TSE; + else + value &= ~XGMAC_TSE; + + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); +} + +static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL); + + value &= ~XGMAC_TXQEN; + if (qmode != MTL_QUEUE_AVB) { + value |= 0x2 << XGMAC_TXQEN_SHIFT; + writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); + } else { + value |= 0x1 << XGMAC_TXQEN_SHIFT; + writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL); + } + + writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); +} + +static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value &= ~XGMAC_RBSZ; + value |= bfsize << XGMAC_RBSZ_SHIFT; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); +} + +static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_RX_CONFIG); + + value &= ~XGMAC_CONFIG_HDSMS; + value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ + writel(value, ioaddr + XGMAC_RX_CONFIG); + + value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan)); + if (en) + value |= XGMAC_SPH; + else + value &= ~XGMAC_SPH; + writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan)); +} + +static int dwxgmac2_enable_tbs(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + if (en) + value |= XGMAC_EDSE; + else + value &= ~XGMAC_EDSE; + + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE; + if (en && !value) + return -EIO; + + writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0); + writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1); + writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2); + writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3); + return 0; +} + +const struct stmmac_dma_ops dwxgmac210_dma_ops = { + .reset = dwxgmac2_dma_reset, + .init = dwxgmac2_dma_init, + .init_chan = dwxgmac2_dma_init_chan, + .init_rx_chan = dwxgmac2_dma_init_rx_chan, + .init_tx_chan = dwxgmac2_dma_init_tx_chan, + .axi = dwxgmac2_dma_axi, + .dump_regs = dwxgmac2_dma_dump_regs, + .dma_rx_mode = dwxgmac2_dma_rx_mode, + .dma_tx_mode = dwxgmac2_dma_tx_mode, + .enable_dma_irq = dwxgmac2_enable_dma_irq, + .disable_dma_irq = dwxgmac2_disable_dma_irq, + .start_tx = dwxgmac2_dma_start_tx, + .stop_tx = dwxgmac2_dma_stop_tx, + .start_rx = dwxgmac2_dma_start_rx, + .stop_rx = dwxgmac2_dma_stop_rx, + .dma_interrupt = dwxgmac2_dma_interrupt, + .get_hw_feature = dwxgmac2_get_hw_feature, + .rx_watchdog = dwxgmac2_rx_watchdog, + .set_rx_ring_len = dwxgmac2_set_rx_ring_len, + .set_tx_ring_len = dwxgmac2_set_tx_ring_len, + .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr, + .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr, + .enable_tso = dwxgmac2_enable_tso, + .qmode = dwxgmac2_qmode, + .set_bfsize = dwxgmac2_set_bfsize, + .enable_sph = dwxgmac2_enable_sph, + .enable_tbs = dwxgmac2_enable_tbs, +}; diff --git a/devices/stmmac/dwxgmac2_dma-6.1-orig.c b/devices/stmmac/dwxgmac2_dma-6.1-orig.c new file mode 100644 index 00000000..5e98355f --- /dev/null +++ b/devices/stmmac/dwxgmac2_dma-6.1-orig.c @@ -0,0 +1,582 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac XGMAC support. + */ + +#include +#include "stmmac.h" +#include "dwxgmac2.h" + +static int dwxgmac2_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + XGMAC_DMA_MODE); + + /* DMA SW reset */ + writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE); + + return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value, + !(value & XGMAC_SWR), 0, 100000); +} + +static void dwxgmac2_dma_init(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, int atds) +{ + u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); + + if (dma_cfg->aal) + value |= XGMAC_AAL; + + if (dma_cfg->eame) + value |= XGMAC_EAME; + + writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); +} + +static void dwxgmac2_dma_init_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan)); + + if (dma_cfg->pblx8) + value |= XGMAC_PBLx8; + + writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan)); + writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); +} + +static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t phy, u32 chan) +{ + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value &= ~XGMAC_RxPBL; + value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + + writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan)); + writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan)); +} + +static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t phy, u32 chan) +{ + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + value &= ~XGMAC_TxPBL; + value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL; + value |= XGMAC_OSP; + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan)); + writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan)); +} + +static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); + int i; + + if (axi->axi_lpi_en) + value |= XGMAC_EN_LPI; + if (axi->axi_xit_frm) + value |= XGMAC_LPI_XIT_PKT; + + value &= ~XGMAC_WR_OSR_LMT; + value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) & + XGMAC_WR_OSR_LMT; + + value &= ~XGMAC_RD_OSR_LMT; + value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) & + XGMAC_RD_OSR_LMT; + + if (!axi->axi_fb) + value |= XGMAC_UNDEF; + + value &= ~XGMAC_BLEN; + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= XGMAC_BLEN256; + break; + case 128: + value |= XGMAC_BLEN128; + break; + case 64: + value |= XGMAC_BLEN64; + break; + case 32: + value |= XGMAC_BLEN32; + break; + case 16: + value |= XGMAC_BLEN16; + break; + case 8: + value |= XGMAC_BLEN8; + break; + case 4: + value |= XGMAC_BLEN4; + break; + } + } + + writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); + writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL); + writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL); +} + +static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space) +{ + int i; + + for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++) + reg_space[i] = readl(ioaddr + i * 4); +} + +static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); + unsigned int rqs = fifosz / 256 - 1; + + if (mode == SF_DMA_MODE) { + value |= XGMAC_RSF; + } else { + value &= ~XGMAC_RSF; + value &= ~XGMAC_RTC; + + if (mode <= 64) + value |= 0x0 << XGMAC_RTC_SHIFT; + else if (mode <= 96) + value |= 0x2 << XGMAC_RTC_SHIFT; + else + value |= 0x3 << XGMAC_RTC_SHIFT; + } + + value &= ~XGMAC_RQS; + value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS; + + if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { + u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel)); + unsigned int rfd, rfa; + + value |= XGMAC_EHFC; + + /* Set Threshold for Activating Flow Control to min 2 frames, + * i.e. 1500 * 2 = 3000 bytes. + * + * Set Threshold for Deactivating Flow Control to min 1 frame, + * i.e. 1500 bytes. + */ + switch (fifosz) { + case 4096: + /* This violates the above formula because of FIFO size + * limit therefore overflow may occur in spite of this. + */ + rfd = 0x03; /* Full-2.5K */ + rfa = 0x01; /* Full-1.5K */ + break; + + default: + rfd = 0x07; /* Full-4.5K */ + rfa = 0x04; /* Full-3K */ + break; + } + + flow &= ~XGMAC_RFD; + flow |= rfd << XGMAC_RFD_SHIFT; + + flow &= ~XGMAC_RFA; + flow |= rfa << XGMAC_RFA_SHIFT; + + writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel)); + } + + writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); + + /* Enable MTL RX overflow */ + value = readl(ioaddr + XGMAC_MTL_QINTEN(channel)); + writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel)); +} + +static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + unsigned int tqs = fifosz / 256 - 1; + + if (mode == SF_DMA_MODE) { + value |= XGMAC_TSF; + } else { + value &= ~XGMAC_TSF; + value &= ~XGMAC_TTC; + + if (mode <= 64) + value |= 0x0 << XGMAC_TTC_SHIFT; + else if (mode <= 96) + value |= 0x2 << XGMAC_TTC_SHIFT; + else if (mode <= 128) + value |= 0x3 << XGMAC_TTC_SHIFT; + else if (mode <= 192) + value |= 0x4 << XGMAC_TTC_SHIFT; + else if (mode <= 256) + value |= 0x5 << XGMAC_TTC_SHIFT; + else if (mode <= 384) + value |= 0x6 << XGMAC_TTC_SHIFT; + else + value |= 0x7 << XGMAC_TTC_SHIFT; + } + + /* Use static TC to Queue mapping */ + value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP; + + value &= ~XGMAC_TXQEN; + if (qmode != MTL_QUEUE_AVB) + value |= 0x2 << XGMAC_TXQEN_SHIFT; + else + value |= 0x1 << XGMAC_TXQEN_SHIFT; + + value &= ~XGMAC_TQS; + value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS; + + writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); +} + +static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan, + bool rx, bool tx) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); + + if (rx) + value |= XGMAC_DMA_INT_DEFAULT_RX; + if (tx) + value |= XGMAC_DMA_INT_DEFAULT_TX; + + writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); +} + +static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan, + bool rx, bool tx) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); + + if (rx) + value &= ~XGMAC_DMA_INT_DEFAULT_RX; + if (tx) + value &= ~XGMAC_DMA_INT_DEFAULT_TX; + + writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan)); +} + +static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + value |= XGMAC_TXST; + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_TX_CONFIG); + value |= XGMAC_CONFIG_TE; + writel(value, ioaddr + XGMAC_TX_CONFIG); +} + +static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + value &= ~XGMAC_TXST; + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_TX_CONFIG); + value &= ~XGMAC_CONFIG_TE; + writel(value, ioaddr + XGMAC_TX_CONFIG); +} + +static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value |= XGMAC_RXST; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_RX_CONFIG); + value |= XGMAC_CONFIG_RE; + writel(value, ioaddr + XGMAC_RX_CONFIG); +} + +static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value &= ~XGMAC_RXST; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); +} + +static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, + u32 dir) +{ + u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); + u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); + int ret = 0; + + if (dir == DMA_DIR_RX) + intr_status &= XGMAC_DMA_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= XGMAC_DMA_STATUS_MSK_TX; + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & XGMAC_AIS)) { + if (unlikely(intr_status & XGMAC_RBU)) { + x->rx_buf_unav_irq++; + ret |= handle_rx; + } + if (unlikely(intr_status & XGMAC_TPS)) { + x->tx_process_stopped_irq++; + ret |= tx_hard_error; + } + if (unlikely(intr_status & XGMAC_FBE)) { + x->fatal_bus_error_irq++; + ret |= tx_hard_error; + } + } + + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & XGMAC_NIS)) { + x->normal_irq_n++; + + if (likely(intr_status & XGMAC_RI)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { + x->tx_normal_irq_n++; + ret |= handle_tx; + } + } + + /* Clear interrupts */ + writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan)); + + return ret; +} + +static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) +{ + u32 hw_cap; + + /* MAC HW feature 0 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0); + dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27; + dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16; + dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14; + dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; + dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; + dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; + dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10); + dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9; + dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; + dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; + dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6; + dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4; + dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1; + + /* MAC HW feature 1 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1); + dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27; + dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24; + dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20; + dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18; + dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17; + + dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14; + switch (dma_cap->addr64) { + case 0: + dma_cap->addr64 = 32; + break; + case 1: + dma_cap->addr64 = 40; + break; + case 2: + dma_cap->addr64 = 48; + break; + default: + dma_cap->addr64 = 32; + break; + } + + dma_cap->tx_fifo_size = + 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6); + dma_cap->rx_fifo_size = + 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0); + + /* MAC HW feature 2 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2); + dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24; + dma_cap->number_tx_channel = + ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1; + dma_cap->number_rx_channel = + ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1; + dma_cap->number_tx_queues = + ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1; + dma_cap->number_rx_queues = + ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1; + + /* MAC HW feature 3 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3); + dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27; + dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26; + dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23; + dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20; + dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19; + dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14; + dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13; + dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11; + dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9; + dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3; + + return 0; +} + +static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) +{ + writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue)); +} + +static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan)); +} + +static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) +{ + writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan)); +} + +static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan) +{ + writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan)); +} + +static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan) +{ + writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan)); +} + +static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + if (en) + value |= XGMAC_TSE; + else + value &= ~XGMAC_TSE; + + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); +} + +static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL); + + value &= ~XGMAC_TXQEN; + if (qmode != MTL_QUEUE_AVB) { + value |= 0x2 << XGMAC_TXQEN_SHIFT; + writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); + } else { + value |= 0x1 << XGMAC_TXQEN_SHIFT; + writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL); + } + + writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); +} + +static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) +{ + u32 value; + + value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); + value &= ~XGMAC_RBSZ; + value |= bfsize << XGMAC_RBSZ_SHIFT; + writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); +} + +static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_RX_CONFIG); + + value &= ~XGMAC_CONFIG_HDSMS; + value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ + writel(value, ioaddr + XGMAC_RX_CONFIG); + + value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan)); + if (en) + value |= XGMAC_SPH; + else + value &= ~XGMAC_SPH; + writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan)); +} + +static int dwxgmac2_enable_tbs(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + if (en) + value |= XGMAC_EDSE; + else + value &= ~XGMAC_EDSE; + + writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); + + value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE; + if (en && !value) + return -EIO; + + writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0); + writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1); + writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2); + writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3); + return 0; +} + +const struct stmmac_dma_ops dwxgmac210_dma_ops = { + .reset = dwxgmac2_dma_reset, + .init = dwxgmac2_dma_init, + .init_chan = dwxgmac2_dma_init_chan, + .init_rx_chan = dwxgmac2_dma_init_rx_chan, + .init_tx_chan = dwxgmac2_dma_init_tx_chan, + .axi = dwxgmac2_dma_axi, + .dump_regs = dwxgmac2_dma_dump_regs, + .dma_rx_mode = dwxgmac2_dma_rx_mode, + .dma_tx_mode = dwxgmac2_dma_tx_mode, + .enable_dma_irq = dwxgmac2_enable_dma_irq, + .disable_dma_irq = dwxgmac2_disable_dma_irq, + .start_tx = dwxgmac2_dma_start_tx, + .stop_tx = dwxgmac2_dma_stop_tx, + .start_rx = dwxgmac2_dma_start_rx, + .stop_rx = dwxgmac2_dma_stop_rx, + .dma_interrupt = dwxgmac2_dma_interrupt, + .get_hw_feature = dwxgmac2_get_hw_feature, + .rx_watchdog = dwxgmac2_rx_watchdog, + .set_rx_ring_len = dwxgmac2_set_rx_ring_len, + .set_tx_ring_len = dwxgmac2_set_tx_ring_len, + .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr, + .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr, + .enable_tso = dwxgmac2_enable_tso, + .qmode = dwxgmac2_qmode, + .set_bfsize = dwxgmac2_set_bfsize, + .enable_sph = dwxgmac2_enable_sph, + .enable_tbs = dwxgmac2_enable_tbs, +}; diff --git a/devices/stmmac/dwxlgmac2-6.1-ethercat.h b/devices/stmmac/dwxlgmac2-6.1-ethercat.h new file mode 100644 index 00000000..726090d4 --- /dev/null +++ b/devices/stmmac/dwxlgmac2-6.1-ethercat.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare XLGMAC definitions. + */ + +#ifndef __STMMAC_DWXLGMAC2_H__ +#define __STMMAC_DWXLGMAC2_H__ + +/* MAC Registers */ +#define XLGMAC_CONFIG_SS GENMASK(30, 28) +#define XLGMAC_CONFIG_SS_SHIFT 28 +#define XLGMAC_CONFIG_SS_40G (0x0 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_25G (0x1 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_50G (0x2 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_100G (0x3 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_10G (0x4 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_2500 (0x6 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_1000 (0x7 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_RXQ_ENABLE_CTRL0 0x00000140 + +#endif /* __STMMAC_DWXLGMAC2_H__ */ diff --git a/devices/stmmac/dwxlgmac2-6.1-orig.h b/devices/stmmac/dwxlgmac2-6.1-orig.h new file mode 100644 index 00000000..726090d4 --- /dev/null +++ b/devices/stmmac/dwxlgmac2-6.1-orig.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare XLGMAC definitions. + */ + +#ifndef __STMMAC_DWXLGMAC2_H__ +#define __STMMAC_DWXLGMAC2_H__ + +/* MAC Registers */ +#define XLGMAC_CONFIG_SS GENMASK(30, 28) +#define XLGMAC_CONFIG_SS_SHIFT 28 +#define XLGMAC_CONFIG_SS_40G (0x0 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_25G (0x1 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_50G (0x2 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_100G (0x3 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_10G (0x4 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_2500 (0x6 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_CONFIG_SS_1000 (0x7 << XLGMAC_CONFIG_SS_SHIFT) +#define XLGMAC_RXQ_ENABLE_CTRL0 0x00000140 + +#endif /* __STMMAC_DWXLGMAC2_H__ */ diff --git a/devices/stmmac/enh_desc-6.1-ethercat.c b/devices/stmmac/enh_desc-6.1-ethercat.c new file mode 100644 index 00000000..216218b7 --- /dev/null +++ b/devices/stmmac/enh_desc-6.1-ethercat.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This contains the functions to handle the enhanced descriptors. + + Copyright (C) 2007-2014 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "common-6.1-ethercat.h" +#include "descs_com-6.1-ethercat.h" + +static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes0 = le32_to_cpu(p->des0); + int ret = tx_done; + + /* Get tx owner first */ + if (unlikely(tdes0 & ETDES0_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes0 & ETDES0_LAST_SEGMENT))) + return tx_not_ls; + + if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) { + if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT)) + x->tx_jabber++; + + if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) { + x->tx_frame_flushed++; + dwmac_dma_flush_tx_fifo(ioaddr); + } + + if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(tdes0 & ETDES0_NO_CARRIER)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely((tdes0 & ETDES0_LATE_COLLISION) || + (tdes0 & ETDES0_EXCESSIVE_COLLISIONS))) + stats->collisions += + (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3; + + if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL)) + x->tx_deferred++; + + if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) { + dwmac_dma_flush_tx_fifo(ioaddr); + x->tx_underflow++; + } + + if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR)) + x->tx_ip_header_error++; + + if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) { + x->tx_payload_error++; + dwmac_dma_flush_tx_fifo(ioaddr); + } + + ret = tx_err; + } + + if (unlikely(tdes0 & ETDES0_DEFERRED)) + x->tx_deferred++; + +#ifdef STMMAC_VLAN_TAG_USED + if (tdes0 & ETDES0_VLAN_FRAME) + x->tx_vlan++; +#endif + + return ret; +} + +static int enh_desc_get_tx_len(struct dma_desc *p) +{ + return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK); +} + +static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) +{ + int ret = good_frame; + u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; + + /* bits 5 7 0 | Frame status + * ---------------------------------------------------------- + * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) + * 1 0 0 | IPv4/6 No CSUM errorS. + * 1 0 1 | IPv4/6 CSUM PAYLOAD error + * 1 1 0 | IPv4/6 CSUM IP HR error + * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS + * 0 0 1 | IPv4/6 unsupported IP PAYLOAD + * 0 1 1 | COE bypassed.. no IPv4/6 frame + * 0 1 0 | Reserved. + */ + if (status == 0x0) + ret = llc_snap; + else if (status == 0x4) + ret = good_frame; + else if (status == 0x5) + ret = csum_none; + else if (status == 0x6) + ret = csum_none; + else if (status == 0x7) + ret = csum_none; + else if (status == 0x1) + ret = discard_frame; + else if (status == 0x3) + ret = discard_frame; + return ret; +} + +static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p) +{ + unsigned int rdes0 = le32_to_cpu(p->basic.des0); + unsigned int rdes4 = le32_to_cpu(p->des4); + + if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) { + int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8; + + if (rdes4 & ERDES4_IP_HDR_ERR) + x->ip_hdr_err++; + if (rdes4 & ERDES4_IP_PAYLOAD_ERR) + x->ip_payload_err++; + if (rdes4 & ERDES4_IP_CSUM_BYPASSED) + x->ip_csum_bypassed++; + if (rdes4 & ERDES4_IPV4_PKT_RCVD) + x->ipv4_pkt_rcvd++; + if (rdes4 & ERDES4_IPV6_PKT_RCVD) + x->ipv6_pkt_rcvd++; + + if (message_type == RDES_EXT_NO_PTP) + x->no_ptp_rx_msg_type_ext++; + else if (message_type == RDES_EXT_SYNC) + x->ptp_rx_msg_type_sync++; + else if (message_type == RDES_EXT_FOLLOW_UP) + x->ptp_rx_msg_type_follow_up++; + else if (message_type == RDES_EXT_DELAY_REQ) + x->ptp_rx_msg_type_delay_req++; + else if (message_type == RDES_EXT_DELAY_RESP) + x->ptp_rx_msg_type_delay_resp++; + else if (message_type == RDES_EXT_PDELAY_REQ) + x->ptp_rx_msg_type_pdelay_req++; + else if (message_type == RDES_EXT_PDELAY_RESP) + x->ptp_rx_msg_type_pdelay_resp++; + else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) + x->ptp_rx_msg_type_pdelay_follow_up++; + else if (message_type == RDES_PTP_ANNOUNCE) + x->ptp_rx_msg_type_announce++; + else if (message_type == RDES_PTP_MANAGEMENT) + x->ptp_rx_msg_type_management++; + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) + x->ptp_rx_msg_pkt_reserved_type++; + + if (rdes4 & ERDES4_PTP_FRAME_TYPE) + x->ptp_frame_type++; + if (rdes4 & ERDES4_PTP_VER) + x->ptp_ver++; + if (rdes4 & ERDES4_TIMESTAMP_DROPPED) + x->timestamp_dropped++; + if (rdes4 & ERDES4_AV_PKT_RCVD) + x->av_pkt_rcvd++; + if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD) + x->av_tagged_pkt_rcvd++; + if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18) + x->vlan_tag_priority_val++; + if (rdes4 & ERDES4_L3_FILTER_MATCH) + x->l3_filter_match++; + if (rdes4 & ERDES4_L4_FILTER_MATCH) + x->l4_filter_match++; + if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26) + x->l3_l4_filter_no_match++; + } +} + +static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int rdes0 = le32_to_cpu(p->des0); + int ret = good_frame; + + if (unlikely(rdes0 & RDES0_OWN)) + return dma_own; + + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { + stats->rx_length_errors++; + return discard_frame; + } + + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { + if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { + x->rx_desc++; + stats->rx_length_errors++; + } + if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR)) + x->rx_gmac_overflow++; + + if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR)) + pr_err("\tIPC Csum Error/Giant frame\n"); + + if (unlikely(rdes0 & RDES0_COLLISION)) + stats->collisions++; + if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG)) + x->rx_watchdog++; + + if (unlikely(rdes0 & RDES0_MII_ERROR)) /* GMII */ + x->rx_mii++; + + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { + x->rx_crc_errors++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + + /* After a payload csum error, the ES bit is set. + * It doesn't match with the information reported into the databook. + * At any rate, we need to understand if the CSUM hw computation is ok + * and report this info to the upper layers. */ + if (likely(ret == good_frame)) + ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), + !!(rdes0 & RDES0_FRAME_TYPE), + !!(rdes0 & ERDES0_RX_MAC_ADDR)); + + if (unlikely(rdes0 & RDES0_DRIBBLING)) + x->dribbling_bit++; + + if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) { + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) { + x->da_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) { + x->rx_length++; + ret = discard_frame; + } +#ifdef STMMAC_VLAN_TAG_USED + if (rdes0 & RDES0_VLAN_TAG) + x->rx_vlan++; +#endif + + return ret; +} + +static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end, int bfsize) +{ + int bfsize1; + + p->des0 |= cpu_to_le32(RDES0_OWN); + + bfsize1 = min(bfsize, BUF_SIZE_8KiB); + p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK); + + if (mode == STMMAC_CHAIN_MODE) + ehn_desc_rx_set_on_chain(p); + else + ehn_desc_rx_set_on_ring(p, end, bfsize); + + if (disable_rx_ic) + p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); +} + +static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 &= cpu_to_le32(~ETDES0_OWN); + if (mode == STMMAC_CHAIN_MODE) + enh_desc_end_tx_desc_on_chain(p); + else + enh_desc_end_tx_desc_on_ring(p, end); +} + +static int enh_desc_get_tx_owner(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31; +} + +static void enh_desc_set_tx_owner(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(ETDES0_OWN); +} + +static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic) +{ + p->des0 |= cpu_to_le32(RDES0_OWN); +} + +static int enh_desc_get_tx_ls(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29; +} + +static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) +{ + int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21; + + memset(p, 0, offsetof(struct dma_desc, des2)); + if (mode == STMMAC_CHAIN_MODE) + enh_desc_end_tx_desc_on_chain(p); + else + enh_desc_end_tx_desc_on_ring(p, ter); +} + +static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls, unsigned int tot_pkt_len) +{ + unsigned int tdes0 = le32_to_cpu(p->des0); + + if (mode == STMMAC_CHAIN_MODE) + enh_set_tx_desc_len_on_chain(p, len); + else + enh_set_tx_desc_len_on_ring(p, len); + + if (is_fs) + tdes0 |= ETDES0_FIRST_SEGMENT; + else + tdes0 &= ~ETDES0_FIRST_SEGMENT; + + if (likely(csum_flag)) + tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); + else + tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); + + if (ls) + tdes0 |= ETDES0_LAST_SEGMENT; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes0 |= ETDES0_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des0 = cpu_to_le32(tdes0); +} + +static void enh_desc_set_tx_ic(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(ETDES0_INTERRUPT); +} + +static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) +{ + unsigned int csum = 0; + /* The type-1 checksum offload engines append the checksum at + * the end of frame and the two bytes of checksum are added in + * the length. + * Adjust for that in the framelen for type-1 checksum offload + * engines. + */ + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + csum = 2; + + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK) + >> RDES0_FRAME_LEN_SHIFT) - csum); +} + +static void enh_desc_enable_tx_timestamp(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE); +} + +static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17; +} + +static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts) +{ + u64 ns; + + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; + ns = le32_to_cpu(p->des6); + /* convert high/sec time stamp value to nanosecond */ + ns += le32_to_cpu(p->des7) * 1000000000ULL; + } else { + struct dma_desc *p = (struct dma_desc *)desc; + ns = le32_to_cpu(p->des2); + ns += le32_to_cpu(p->des3) * 1000000000ULL; + } + + *ts = ns; +} + +static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) +{ + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; + return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7; + } else { + struct dma_desc *p = (struct dma_desc *)desc; + if ((le32_to_cpu(p->des2) == 0xffffffff) && + (le32_to_cpu(p->des3) == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else + return 1; + } +} + +static void enh_desc_display_ring(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size) +{ + struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + dma_addr_t dma_addr; + int i; + + pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + u64 x; + dma_addr = dma_rx_phy + i * sizeof(*ep); + + x = *(u64 *)ep; + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + (unsigned int)x, (unsigned int)(x >> 32), + ep->basic.des2, ep->basic.des3); + ep++; + } + pr_info("\n"); +} + +static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(addr); +} + +static void enh_desc_clear(struct dma_desc *p) +{ + p->des2 = 0; +} + +const struct stmmac_desc_ops enh_desc_ops = { + .tx_status = enh_desc_get_tx_status, + .rx_status = enh_desc_get_rx_status, + .get_tx_len = enh_desc_get_tx_len, + .init_rx_desc = enh_desc_init_rx_desc, + .init_tx_desc = enh_desc_init_tx_desc, + .get_tx_owner = enh_desc_get_tx_owner, + .release_tx_desc = enh_desc_release_tx_desc, + .prepare_tx_desc = enh_desc_prepare_tx_desc, + .set_tx_ic = enh_desc_set_tx_ic, + .get_tx_ls = enh_desc_get_tx_ls, + .set_tx_owner = enh_desc_set_tx_owner, + .set_rx_owner = enh_desc_set_rx_owner, + .get_rx_frame_len = enh_desc_get_rx_frame_len, + .rx_extended_status = enh_desc_get_ext_status, + .enable_tx_timestamp = enh_desc_enable_tx_timestamp, + .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, + .get_timestamp = enh_desc_get_timestamp, + .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, + .display_ring = enh_desc_display_ring, + .set_addr = enh_desc_set_addr, + .clear = enh_desc_clear, +}; diff --git a/devices/stmmac/enh_desc-6.1-orig.c b/devices/stmmac/enh_desc-6.1-orig.c new file mode 100644 index 00000000..1bcbbd72 --- /dev/null +++ b/devices/stmmac/enh_desc-6.1-orig.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This contains the functions to handle the enhanced descriptors. + + Copyright (C) 2007-2014 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "common.h" +#include "descs_com.h" + +static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes0 = le32_to_cpu(p->des0); + int ret = tx_done; + + /* Get tx owner first */ + if (unlikely(tdes0 & ETDES0_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes0 & ETDES0_LAST_SEGMENT))) + return tx_not_ls; + + if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) { + if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT)) + x->tx_jabber++; + + if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) { + x->tx_frame_flushed++; + dwmac_dma_flush_tx_fifo(ioaddr); + } + + if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(tdes0 & ETDES0_NO_CARRIER)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely((tdes0 & ETDES0_LATE_COLLISION) || + (tdes0 & ETDES0_EXCESSIVE_COLLISIONS))) + stats->collisions += + (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3; + + if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL)) + x->tx_deferred++; + + if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) { + dwmac_dma_flush_tx_fifo(ioaddr); + x->tx_underflow++; + } + + if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR)) + x->tx_ip_header_error++; + + if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) { + x->tx_payload_error++; + dwmac_dma_flush_tx_fifo(ioaddr); + } + + ret = tx_err; + } + + if (unlikely(tdes0 & ETDES0_DEFERRED)) + x->tx_deferred++; + +#ifdef STMMAC_VLAN_TAG_USED + if (tdes0 & ETDES0_VLAN_FRAME) + x->tx_vlan++; +#endif + + return ret; +} + +static int enh_desc_get_tx_len(struct dma_desc *p) +{ + return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK); +} + +static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) +{ + int ret = good_frame; + u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; + + /* bits 5 7 0 | Frame status + * ---------------------------------------------------------- + * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) + * 1 0 0 | IPv4/6 No CSUM errorS. + * 1 0 1 | IPv4/6 CSUM PAYLOAD error + * 1 1 0 | IPv4/6 CSUM IP HR error + * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS + * 0 0 1 | IPv4/6 unsupported IP PAYLOAD + * 0 1 1 | COE bypassed.. no IPv4/6 frame + * 0 1 0 | Reserved. + */ + if (status == 0x0) + ret = llc_snap; + else if (status == 0x4) + ret = good_frame; + else if (status == 0x5) + ret = csum_none; + else if (status == 0x6) + ret = csum_none; + else if (status == 0x7) + ret = csum_none; + else if (status == 0x1) + ret = discard_frame; + else if (status == 0x3) + ret = discard_frame; + return ret; +} + +static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p) +{ + unsigned int rdes0 = le32_to_cpu(p->basic.des0); + unsigned int rdes4 = le32_to_cpu(p->des4); + + if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) { + int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8; + + if (rdes4 & ERDES4_IP_HDR_ERR) + x->ip_hdr_err++; + if (rdes4 & ERDES4_IP_PAYLOAD_ERR) + x->ip_payload_err++; + if (rdes4 & ERDES4_IP_CSUM_BYPASSED) + x->ip_csum_bypassed++; + if (rdes4 & ERDES4_IPV4_PKT_RCVD) + x->ipv4_pkt_rcvd++; + if (rdes4 & ERDES4_IPV6_PKT_RCVD) + x->ipv6_pkt_rcvd++; + + if (message_type == RDES_EXT_NO_PTP) + x->no_ptp_rx_msg_type_ext++; + else if (message_type == RDES_EXT_SYNC) + x->ptp_rx_msg_type_sync++; + else if (message_type == RDES_EXT_FOLLOW_UP) + x->ptp_rx_msg_type_follow_up++; + else if (message_type == RDES_EXT_DELAY_REQ) + x->ptp_rx_msg_type_delay_req++; + else if (message_type == RDES_EXT_DELAY_RESP) + x->ptp_rx_msg_type_delay_resp++; + else if (message_type == RDES_EXT_PDELAY_REQ) + x->ptp_rx_msg_type_pdelay_req++; + else if (message_type == RDES_EXT_PDELAY_RESP) + x->ptp_rx_msg_type_pdelay_resp++; + else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) + x->ptp_rx_msg_type_pdelay_follow_up++; + else if (message_type == RDES_PTP_ANNOUNCE) + x->ptp_rx_msg_type_announce++; + else if (message_type == RDES_PTP_MANAGEMENT) + x->ptp_rx_msg_type_management++; + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) + x->ptp_rx_msg_pkt_reserved_type++; + + if (rdes4 & ERDES4_PTP_FRAME_TYPE) + x->ptp_frame_type++; + if (rdes4 & ERDES4_PTP_VER) + x->ptp_ver++; + if (rdes4 & ERDES4_TIMESTAMP_DROPPED) + x->timestamp_dropped++; + if (rdes4 & ERDES4_AV_PKT_RCVD) + x->av_pkt_rcvd++; + if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD) + x->av_tagged_pkt_rcvd++; + if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18) + x->vlan_tag_priority_val++; + if (rdes4 & ERDES4_L3_FILTER_MATCH) + x->l3_filter_match++; + if (rdes4 & ERDES4_L4_FILTER_MATCH) + x->l4_filter_match++; + if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26) + x->l3_l4_filter_no_match++; + } +} + +static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int rdes0 = le32_to_cpu(p->des0); + int ret = good_frame; + + if (unlikely(rdes0 & RDES0_OWN)) + return dma_own; + + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { + stats->rx_length_errors++; + return discard_frame; + } + + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { + if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { + x->rx_desc++; + stats->rx_length_errors++; + } + if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR)) + x->rx_gmac_overflow++; + + if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR)) + pr_err("\tIPC Csum Error/Giant frame\n"); + + if (unlikely(rdes0 & RDES0_COLLISION)) + stats->collisions++; + if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG)) + x->rx_watchdog++; + + if (unlikely(rdes0 & RDES0_MII_ERROR)) /* GMII */ + x->rx_mii++; + + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { + x->rx_crc_errors++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + + /* After a payload csum error, the ES bit is set. + * It doesn't match with the information reported into the databook. + * At any rate, we need to understand if the CSUM hw computation is ok + * and report this info to the upper layers. */ + if (likely(ret == good_frame)) + ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), + !!(rdes0 & RDES0_FRAME_TYPE), + !!(rdes0 & ERDES0_RX_MAC_ADDR)); + + if (unlikely(rdes0 & RDES0_DRIBBLING)) + x->dribbling_bit++; + + if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) { + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) { + x->da_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) { + x->rx_length++; + ret = discard_frame; + } +#ifdef STMMAC_VLAN_TAG_USED + if (rdes0 & RDES0_VLAN_TAG) + x->rx_vlan++; +#endif + + return ret; +} + +static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end, int bfsize) +{ + int bfsize1; + + p->des0 |= cpu_to_le32(RDES0_OWN); + + bfsize1 = min(bfsize, BUF_SIZE_8KiB); + p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK); + + if (mode == STMMAC_CHAIN_MODE) + ehn_desc_rx_set_on_chain(p); + else + ehn_desc_rx_set_on_ring(p, end, bfsize); + + if (disable_rx_ic) + p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); +} + +static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 &= cpu_to_le32(~ETDES0_OWN); + if (mode == STMMAC_CHAIN_MODE) + enh_desc_end_tx_desc_on_chain(p); + else + enh_desc_end_tx_desc_on_ring(p, end); +} + +static int enh_desc_get_tx_owner(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31; +} + +static void enh_desc_set_tx_owner(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(ETDES0_OWN); +} + +static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic) +{ + p->des0 |= cpu_to_le32(RDES0_OWN); +} + +static int enh_desc_get_tx_ls(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29; +} + +static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) +{ + int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21; + + memset(p, 0, offsetof(struct dma_desc, des2)); + if (mode == STMMAC_CHAIN_MODE) + enh_desc_end_tx_desc_on_chain(p); + else + enh_desc_end_tx_desc_on_ring(p, ter); +} + +static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls, unsigned int tot_pkt_len) +{ + unsigned int tdes0 = le32_to_cpu(p->des0); + + if (mode == STMMAC_CHAIN_MODE) + enh_set_tx_desc_len_on_chain(p, len); + else + enh_set_tx_desc_len_on_ring(p, len); + + if (is_fs) + tdes0 |= ETDES0_FIRST_SEGMENT; + else + tdes0 &= ~ETDES0_FIRST_SEGMENT; + + if (likely(csum_flag)) + tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); + else + tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); + + if (ls) + tdes0 |= ETDES0_LAST_SEGMENT; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes0 |= ETDES0_OWN; + + if (is_fs && tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + dma_wmb(); + + p->des0 = cpu_to_le32(tdes0); +} + +static void enh_desc_set_tx_ic(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(ETDES0_INTERRUPT); +} + +static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) +{ + unsigned int csum = 0; + /* The type-1 checksum offload engines append the checksum at + * the end of frame and the two bytes of checksum are added in + * the length. + * Adjust for that in the framelen for type-1 checksum offload + * engines. + */ + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + csum = 2; + + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK) + >> RDES0_FRAME_LEN_SHIFT) - csum); +} + +static void enh_desc_enable_tx_timestamp(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE); +} + +static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17; +} + +static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts) +{ + u64 ns; + + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; + ns = le32_to_cpu(p->des6); + /* convert high/sec time stamp value to nanosecond */ + ns += le32_to_cpu(p->des7) * 1000000000ULL; + } else { + struct dma_desc *p = (struct dma_desc *)desc; + ns = le32_to_cpu(p->des2); + ns += le32_to_cpu(p->des3) * 1000000000ULL; + } + + *ts = ns; +} + +static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) +{ + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; + return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7; + } else { + struct dma_desc *p = (struct dma_desc *)desc; + if ((le32_to_cpu(p->des2) == 0xffffffff) && + (le32_to_cpu(p->des3) == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else + return 1; + } +} + +static void enh_desc_display_ring(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size) +{ + struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + dma_addr_t dma_addr; + int i; + + pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + u64 x; + dma_addr = dma_rx_phy + i * sizeof(*ep); + + x = *(u64 *)ep; + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + (unsigned int)x, (unsigned int)(x >> 32), + ep->basic.des2, ep->basic.des3); + ep++; + } + pr_info("\n"); +} + +static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(addr); +} + +static void enh_desc_clear(struct dma_desc *p) +{ + p->des2 = 0; +} + +const struct stmmac_desc_ops enh_desc_ops = { + .tx_status = enh_desc_get_tx_status, + .rx_status = enh_desc_get_rx_status, + .get_tx_len = enh_desc_get_tx_len, + .init_rx_desc = enh_desc_init_rx_desc, + .init_tx_desc = enh_desc_init_tx_desc, + .get_tx_owner = enh_desc_get_tx_owner, + .release_tx_desc = enh_desc_release_tx_desc, + .prepare_tx_desc = enh_desc_prepare_tx_desc, + .set_tx_ic = enh_desc_set_tx_ic, + .get_tx_ls = enh_desc_get_tx_ls, + .set_tx_owner = enh_desc_set_tx_owner, + .set_rx_owner = enh_desc_set_rx_owner, + .get_rx_frame_len = enh_desc_get_rx_frame_len, + .rx_extended_status = enh_desc_get_ext_status, + .enable_tx_timestamp = enh_desc_enable_tx_timestamp, + .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, + .get_timestamp = enh_desc_get_timestamp, + .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, + .display_ring = enh_desc_display_ring, + .set_addr = enh_desc_set_addr, + .clear = enh_desc_clear, +}; diff --git a/devices/stmmac/hwif-6.1-ethercat.c b/devices/stmmac/hwif-6.1-ethercat.c new file mode 100644 index 00000000..9406a4ff --- /dev/null +++ b/devices/stmmac/hwif-6.1-ethercat.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac HW Interface Handling + */ + +#include "common-6.1-ethercat.h" +#include "stmmac-6.1-ethercat.h" +#include "stmmac_ptp-6.1-ethercat.h" + +static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) +{ + u32 reg = readl(priv->ioaddr + id_reg); + + if (!reg) { + dev_info(priv->device, "Version ID not available\n"); + return 0x0; + } + + dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n", + (unsigned int)(reg & GENMASK(15, 8)) >> 8, + (unsigned int)(reg & GENMASK(7, 0))); + return reg & GENMASK(7, 0); +} + +static u32 stmmac_get_dev_id(struct stmmac_priv *priv, u32 id_reg) +{ + u32 reg = readl(priv->ioaddr + id_reg); + + if (!reg) { + dev_info(priv->device, "Version ID not available\n"); + return 0x0; + } + + return (reg & GENMASK(15, 8)) >> 8; +} + +static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + if (priv->chain_mode) { + dev_info(priv->device, "Chain mode enabled\n"); + priv->mode = STMMAC_CHAIN_MODE; + mac->mode = &chain_mode_ops; + } else { + dev_info(priv->device, "Ring mode enabled\n"); + priv->mode = STMMAC_RING_MODE; + mac->mode = &ring_mode_ops; + } +} + +static int stmmac_dwmac1_quirks(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + if (priv->plat->enh_desc) { + dev_info(priv->device, "Enhanced/Alternate descriptors\n"); + + /* GMAC older than 3.50 has no extended descriptors */ + if (priv->synopsys_id >= DWMAC_CORE_3_50) { + dev_info(priv->device, "Enabled extended descriptors\n"); + priv->extend_desc = 1; + } else { + dev_warn(priv->device, "Extended descriptors not supported\n"); + } + + mac->desc = &enh_desc_ops; + } else { + dev_info(priv->device, "Normal descriptors\n"); + mac->desc = &ndesc_ops; + } + + stmmac_dwmac_mode_quirk(priv); + return 0; +} + +static int stmmac_dwmac4_quirks(struct stmmac_priv *priv) +{ + stmmac_dwmac_mode_quirk(priv); + return 0; +} + +static int stmmac_dwxlgmac_quirks(struct stmmac_priv *priv) +{ + priv->hw->xlgmac = true; + return 0; +} + +static const struct stmmac_hwif_entry { + bool gmac; + bool gmac4; + bool xgmac; + u32 min_id; + u32 dev_id; + const struct stmmac_regs_off regs; + const void *desc; + const void *dma; + const void *mac; + const void *hwtimestamp; + const void *mode; + const void *tc; + const void *mmc; + int (*setup)(struct stmmac_priv *priv); + int (*quirks)(struct stmmac_priv *priv); +} stmmac_hw[] = { + /* NOTE: New HW versions shall go to the end of this table */ + { + .gmac = false, + .gmac4 = false, + .xgmac = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, + .mmc_off = MMC_GMAC3_X_OFFSET, + }, + .desc = NULL, + .dma = &dwmac100_dma_ops, + .mac = &dwmac100_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = NULL, + .mmc = &dwmac_mmc_ops, + .setup = dwmac100_setup, + .quirks = stmmac_dwmac1_quirks, + }, { + .gmac = true, + .gmac4 = false, + .xgmac = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, + .mmc_off = MMC_GMAC3_X_OFFSET, + }, + .desc = NULL, + .dma = &dwmac1000_dma_ops, + .mac = &dwmac1000_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = NULL, + .mmc = &dwmac_mmc_ops, + .setup = dwmac1000_setup, + .quirks = stmmac_dwmac1_quirks, + }, { + .gmac = false, + .gmac4 = true, + .xgmac = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, + .mac = &dwmac4_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = &dwmac510_tc_ops, + .mmc = &dwmac_mmc_ops, + .setup = dwmac4_setup, + .quirks = stmmac_dwmac4_quirks, + }, { + .gmac = false, + .gmac4 = true, + .xgmac = false, + .min_id = DWMAC_CORE_4_00, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, + .mac = &dwmac410_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, + .mmc = &dwmac_mmc_ops, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = true, + .xgmac = false, + .min_id = DWMAC_CORE_4_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac410_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, + .mmc = &dwmac_mmc_ops, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = true, + .xgmac = false, + .min_id = DWMAC_CORE_5_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac510_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, + .mmc = &dwmac_mmc_ops, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = false, + .xgmac = true, + .min_id = DWXGMAC_CORE_2_10, + .dev_id = DWXGMAC_ID, + .regs = { + .ptp_off = PTP_XGMAC_OFFSET, + .mmc_off = MMC_XGMAC_OFFSET, + }, + .desc = &dwxgmac210_desc_ops, + .dma = &dwxgmac210_dma_ops, + .mac = &dwxgmac210_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = &dwmac510_tc_ops, + .mmc = &dwxgmac_mmc_ops, + .setup = dwxgmac2_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = false, + .xgmac = true, + .min_id = DWXLGMAC_CORE_2_00, + .dev_id = DWXLGMAC_ID, + .regs = { + .ptp_off = PTP_XGMAC_OFFSET, + .mmc_off = MMC_XGMAC_OFFSET, + }, + .desc = &dwxgmac210_desc_ops, + .dma = &dwxgmac210_dma_ops, + .mac = &dwxlgmac2_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = &dwmac510_tc_ops, + .mmc = &dwxgmac_mmc_ops, + .setup = dwxlgmac2_setup, + .quirks = stmmac_dwxlgmac_quirks, + }, +}; + +int stmmac_hwif_init(struct stmmac_priv *priv) +{ + bool needs_xgmac = priv->plat->has_xgmac; + bool needs_gmac4 = priv->plat->has_gmac4; + bool needs_gmac = priv->plat->has_gmac; + const struct stmmac_hwif_entry *entry; + struct mac_device_info *mac; + bool needs_setup = true; + u32 id, dev_id = 0; + int i, ret; + + if (needs_gmac) { + id = stmmac_get_id(priv, GMAC_VERSION); + } else if (needs_gmac4 || needs_xgmac) { + id = stmmac_get_id(priv, GMAC4_VERSION); + if (needs_xgmac) + dev_id = stmmac_get_dev_id(priv, GMAC4_VERSION); + } else { + id = 0; + } + + /* Save ID for later use */ + priv->synopsys_id = id; + + /* Lets assume some safe values first */ + priv->ptpaddr = priv->ioaddr + + (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET); + priv->mmcaddr = priv->ioaddr + + (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET); + + /* Check for HW specific setup first */ + if (priv->plat->setup) { + mac = priv->plat->setup(priv); + needs_setup = false; + } else { + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + } + + if (!mac) + return -ENOMEM; + + /* Fallback to generic HW */ + for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) { + entry = &stmmac_hw[i]; + + if (needs_gmac ^ entry->gmac) + continue; + if (needs_gmac4 ^ entry->gmac4) + continue; + if (needs_xgmac ^ entry->xgmac) + continue; + /* Use synopsys_id var because some setups can override this */ + if (priv->synopsys_id < entry->min_id) + continue; + if (needs_xgmac && (dev_id ^ entry->dev_id)) + continue; + + /* Only use generic HW helpers if needed */ + mac->desc = mac->desc ? : entry->desc; + mac->dma = mac->dma ? : entry->dma; + mac->mac = mac->mac ? : entry->mac; + mac->ptp = mac->ptp ? : entry->hwtimestamp; + mac->mode = mac->mode ? : entry->mode; + mac->tc = mac->tc ? : entry->tc; + mac->mmc = mac->mmc ? : entry->mmc; + + priv->hw = mac; + priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; + priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; + + /* Entry found */ + if (needs_setup) { + ret = entry->setup(priv); + if (ret) + return ret; + } + + /* Save quirks, if needed for posterior use */ + priv->hwif_quirks = entry->quirks; + return 0; + } + + dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n", + id, needs_gmac, needs_gmac4); + return -EINVAL; +} diff --git a/devices/stmmac/hwif-6.1-ethercat.h b/devices/stmmac/hwif-6.1-ethercat.h new file mode 100644 index 00000000..b2b9cf04 --- /dev/null +++ b/devices/stmmac/hwif-6.1-ethercat.h @@ -0,0 +1,642 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. +// stmmac HW Interface Callbacks + +#ifndef __STMMAC_HWIF_H__ +#define __STMMAC_HWIF_H__ + +#include +#include + +#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \ +({ \ + int __result = -EINVAL; \ + if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) { \ + (__priv)->hw->__module->__cname((__arg0), ##__args); \ + __result = 0; \ + } \ + __result; \ +}) +#define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \ +({ \ + int __result = -EINVAL; \ + if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) \ + __result = (__priv)->hw->__module->__cname((__arg0), ##__args); \ + __result; \ +}) + +struct stmmac_extra_stats; +struct stmmac_safety_stats; +struct dma_desc; +struct dma_extended_desc; +struct dma_edesc; + +/* Descriptors helpers */ +struct stmmac_desc_ops { + /* DMA RX descriptor ring initialization */ + void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, + int end, int bfsize); + /* DMA TX descriptor ring initialization */ + void (*init_tx_desc)(struct dma_desc *p, int mode, int end); + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, bool ls, + unsigned int tot_pkt_len); + void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, + int len2, bool tx_own, bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen); + /* Set/get the owner of the descriptor */ + void (*set_tx_owner)(struct dma_desc *p); + int (*get_tx_owner)(struct dma_desc *p); + /* Clean the tx descriptor as soon as the tx irq is received */ + void (*release_tx_desc)(struct dma_desc *p, int mode); + /* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted */ + void (*set_tx_ic)(struct dma_desc *p); + /* Last tx segment reports the transmit status */ + int (*get_tx_ls)(struct dma_desc *p); + /* Return the transmit status looking at the TDES1 */ + int (*tx_status)(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr); + /* Get the buffer size from the descriptor */ + int (*get_tx_len)(struct dma_desc *p); + /* Handle extra events on specific interrupts hw dependent */ + void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic); + /* Get the receive frame size */ + int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type); + /* Return the reception status looking at the RDES1 */ + int (*rx_status)(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p); + void (*rx_extended_status)(void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p); + /* Set tx timestamp enable bit */ + void (*enable_tx_timestamp) (struct dma_desc *p); + /* get tx timestamp status */ + int (*get_tx_timestamp_status) (struct dma_desc *p); + /* get timestamp value */ + void (*get_timestamp)(void *desc, u32 ats, u64 *ts); + /* get rx timestamp status */ + int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); + /* Display ring */ + void (*display_ring)(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size); + /* set MSS via context descriptor */ + void (*set_mss)(struct dma_desc *p, unsigned int mss); + /* set descriptor skbuff address */ + void (*set_addr)(struct dma_desc *p, dma_addr_t addr); + /* clear descriptor */ + void (*clear)(struct dma_desc *p); + /* RSS */ + int (*get_rx_hash)(struct dma_desc *p, u32 *hash, + enum pkt_hash_types *type); + void (*get_rx_header_len)(struct dma_desc *p, unsigned int *len); + void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr, bool buf2_valid); + void (*set_sarc)(struct dma_desc *p, u32 sarc_type); + void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag, + u32 inner_type); + void (*set_vlan)(struct dma_desc *p, u32 type); + void (*set_tbs)(struct dma_edesc *p, u32 sec, u32 nsec); +}; + +#define stmmac_init_rx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, init_rx_desc, __args) +#define stmmac_init_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, init_tx_desc, __args) +#define stmmac_prepare_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, prepare_tx_desc, __args) +#define stmmac_prepare_tso_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, prepare_tso_tx_desc, __args) +#define stmmac_set_tx_owner(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tx_owner, __args) +#define stmmac_get_tx_owner(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_owner, __args) +#define stmmac_release_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, release_tx_desc, __args) +#define stmmac_set_tx_ic(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tx_ic, __args) +#define stmmac_get_tx_ls(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_ls, __args) +#define stmmac_tx_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, tx_status, __args) +#define stmmac_get_tx_len(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_len, __args) +#define stmmac_set_rx_owner(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_rx_owner, __args) +#define stmmac_get_rx_frame_len(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_frame_len, __args) +#define stmmac_rx_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, rx_status, __args) +#define stmmac_rx_extended_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, rx_extended_status, __args) +#define stmmac_enable_tx_timestamp(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, enable_tx_timestamp, __args) +#define stmmac_get_tx_timestamp_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_timestamp_status, __args) +#define stmmac_get_timestamp(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, get_timestamp, __args) +#define stmmac_get_rx_timestamp_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_timestamp_status, __args) +#define stmmac_display_ring(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, display_ring, __args) +#define stmmac_set_mss(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_mss, __args) +#define stmmac_set_desc_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_addr, __args) +#define stmmac_clear_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, clear, __args) +#define stmmac_get_rx_hash(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_hash, __args) +#define stmmac_get_rx_header_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, get_rx_header_len, __args) +#define stmmac_set_desc_sec_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_sec_addr, __args) +#define stmmac_set_desc_sarc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_sarc, __args) +#define stmmac_set_desc_vlan_tag(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args) +#define stmmac_set_desc_vlan(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_vlan, __args) +#define stmmac_set_desc_tbs(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tbs, __args) + +struct stmmac_dma_cfg; +struct dma_features; + +/* Specific DMA helpers */ +struct stmmac_dma_ops { + /* DMA core initialization */ + int (*reset)(void __iomem *ioaddr); + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, + int atds); + void (*init_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan); + void (*init_rx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t phy, u32 chan); + void (*init_tx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t phy, u32 chan); + /* Configure the AXI Bus Mode Register */ + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); + /* Dump DMA registers */ + void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz, u8 qmode); + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz, u8 qmode); + /* To track extra statistic (if supported) */ + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, + void __iomem *ioaddr); + void (*enable_dma_transmission) (void __iomem *ioaddr); + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan, + bool rx, bool tx); + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan, + bool rx, bool tx); + void (*start_tx)(void __iomem *ioaddr, u32 chan); + void (*stop_tx)(void __iomem *ioaddr, u32 chan); + void (*start_rx)(void __iomem *ioaddr, u32 chan); + void (*stop_rx)(void __iomem *ioaddr, u32 chan); + int (*dma_interrupt) (void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, u32 dir); + /* If supported then get the optional core features */ + int (*get_hw_feature)(void __iomem *ioaddr, + struct dma_features *dma_cap); + /* Program the HW RX Watchdog */ + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue); + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); + void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode); + void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); + void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan); + int (*enable_tbs)(void __iomem *ioaddr, bool en, u32 chan); +}; + +#define stmmac_reset(__priv, __args...) \ + stmmac_do_callback(__priv, dma, reset, __args) +#define stmmac_dma_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init, __args) +#define stmmac_init_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_chan, __args) +#define stmmac_init_rx_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_rx_chan, __args) +#define stmmac_init_tx_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_tx_chan, __args) +#define stmmac_axi(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, axi, __args) +#define stmmac_dump_dma_regs(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dump_regs, __args) +#define stmmac_dma_rx_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args) +#define stmmac_dma_tx_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_tx_mode, __args) +#define stmmac_dma_diagnostic_fr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args) +#define stmmac_enable_dma_transmission(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args) +#define stmmac_enable_dma_irq(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_dma_irq, __args) +#define stmmac_disable_dma_irq(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, disable_dma_irq, __args) +#define stmmac_start_tx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, start_tx, __args) +#define stmmac_stop_tx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, stop_tx, __args) +#define stmmac_start_rx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, start_rx, __args) +#define stmmac_stop_rx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, stop_rx, __args) +#define stmmac_dma_interrupt_status(__priv, __args...) \ + stmmac_do_callback(__priv, dma, dma_interrupt, __args) +#define stmmac_get_hw_feature(__priv, __args...) \ + stmmac_do_callback(__priv, dma, get_hw_feature, __args) +#define stmmac_rx_watchdog(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, rx_watchdog, __args) +#define stmmac_set_tx_ring_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __args) +#define stmmac_set_rx_ring_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __args) +#define stmmac_set_rx_tail_ptr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __args) +#define stmmac_set_tx_tail_ptr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) +#define stmmac_enable_tso(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_tso, __args) +#define stmmac_dma_qmode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, qmode, __args) +#define stmmac_set_dma_bfsize(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_bfsize, __args) +#define stmmac_enable_sph(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_sph, __args) +#define stmmac_enable_tbs(__priv, __args...) \ + stmmac_do_callback(__priv, dma, enable_tbs, __args) + +struct mac_device_info; +struct net_device; +struct rgmii_adv; +struct stmmac_tc_entry; +struct stmmac_pps_cfg; +struct stmmac_rss; +struct stmmac_est; + +/* Helpers to program the MAC core */ +struct stmmac_ops { + /* MAC core initialization */ + void (*core_init)(struct mac_device_info *hw, struct net_device *dev); + /* Enable the MAC RX/TX */ + void (*set_mac)(void __iomem *ioaddr, bool enable); + /* Enable and verify that the IPC module is supported */ + int (*rx_ipc)(struct mac_device_info *hw); + /* Enable RX Queues */ + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); + /* RX Queues Priority */ + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* TX Queues Priority */ + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* RX Queues Routing */ + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, + u32 queue); + /* Program RX Algorithms */ + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); + /* Program TX Algorithms */ + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); + /* Set MTL TX queues weight */ + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, + u32 weight, u32 queue); + /* RX MTL queue to RX dma mapping */ + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); + /* Configure AV Algorithm */ + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, + u32 idle_slope, u32 high_credit, u32 low_credit, + u32 queue); + /* Dump MAC registers */ + void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); + /* Handle extra events on specific interrupts hw dependent */ + int (*host_irq_status)(struct mac_device_info *hw, + struct stmmac_extra_stats *x); + /* Handle MTL interrupts */ + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); + /* Multicast filter setting */ + void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); + /* Flow control setting */ + void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, u32 tx_cnt); + /* Set power management mode (e.g. magic frame) */ + void (*pmt)(struct mac_device_info *hw, unsigned long mode); + /* Set/Get Unicast MAC addresses */ + void (*set_umac_addr)(struct mac_device_info *hw, + const unsigned char *addr, + unsigned int reg_n); + void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + unsigned int reg_n); + void (*set_eee_mode)(struct mac_device_info *hw, + bool en_tx_lpi_clockgating); + void (*reset_eee_mode)(struct mac_device_info *hw); + void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et); + void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); + void (*set_eee_pls)(struct mac_device_info *hw, int link); + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues); + /* PCS calls */ + void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback); + void (*pcs_rane)(void __iomem *ioaddr, bool restart); + void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); + /* Safety Features */ + int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_feature_cfg *safety_cfg); + int (*safety_feat_irq_status)(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats); + int (*safety_feat_dump)(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc); + /* Flexible RX Parser */ + int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count); + /* Flexible PPS */ + int (*flex_pps_config)(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); + /* Loopback for selftests */ + void (*set_mac_loopback)(void __iomem *ioaddr, bool enable); + /* RSS */ + int (*rss_configure)(struct mac_device_info *hw, + struct stmmac_rss *cfg, u32 num_rxq); + /* VLAN */ + void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash, + __le16 perfect_match, bool is_double); + void (*enable_vlan)(struct mac_device_info *hw, u32 type); + int (*add_hw_vlan_rx_fltr)(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid); + int (*del_hw_vlan_rx_fltr)(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid); + void (*restore_hw_vlan_rx_fltr)(struct net_device *dev, + struct mac_device_info *hw); + /* TX Timestamp */ + int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts); + /* Source Address Insertion / Replacement */ + void (*sarc_configure)(void __iomem *ioaddr, int val); + /* Filtering */ + int (*config_l3_filter)(struct mac_device_info *hw, u32 filter_no, + bool en, bool ipv6, bool sa, bool inv, + u32 match); + int (*config_l4_filter)(struct mac_device_info *hw, u32 filter_no, + bool en, bool udp, bool sa, bool inv, + u32 match); + void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); + int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg, + unsigned int ptp_rate); + void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); + void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, + bool enable); + void (*fpe_send_mpacket)(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, + enum stmmac_mpacket_type type); + int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev); +}; + +#define stmmac_core_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, core_init, __args) +#define stmmac_mac_set(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mac, __args) +#define stmmac_rx_ipc(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rx_ipc, __args) +#define stmmac_rx_queue_enable(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_enable, __args) +#define stmmac_rx_queue_prio(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_prio, __args) +#define stmmac_tx_queue_prio(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, tx_queue_prio, __args) +#define stmmac_rx_queue_routing(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_routing, __args) +#define stmmac_prog_mtl_rx_algorithms(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, prog_mtl_rx_algorithms, __args) +#define stmmac_prog_mtl_tx_algorithms(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, prog_mtl_tx_algorithms, __args) +#define stmmac_set_mtl_tx_queue_weight(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __args) +#define stmmac_map_mtl_to_dma(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, map_mtl_to_dma, __args) +#define stmmac_config_cbs(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, config_cbs, __args) +#define stmmac_dump_mac_regs(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, dump_regs, __args) +#define stmmac_host_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, host_irq_status, __args) +#define stmmac_host_mtl_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, host_mtl_irq_status, __args) +#define stmmac_set_filter(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_filter, __args) +#define stmmac_flow_ctrl(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, flow_ctrl, __args) +#define stmmac_pmt(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pmt, __args) +#define stmmac_set_umac_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_umac_addr, __args) +#define stmmac_get_umac_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, get_umac_addr, __args) +#define stmmac_set_eee_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_mode, __args) +#define stmmac_reset_eee_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args) +#define stmmac_set_eee_lpi_timer(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args) +#define stmmac_set_eee_timer(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_timer, __args) +#define stmmac_set_eee_pls(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_pls, __args) +#define stmmac_mac_debug(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, debug, __args) +#define stmmac_pcs_ctrl_ane(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args) +#define stmmac_pcs_rane(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_rane, __args) +#define stmmac_pcs_get_adv_lp(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args) +#define stmmac_safety_feat_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_config, __args) +#define stmmac_safety_feat_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args) +#define stmmac_safety_feat_dump(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_dump, __args) +#define stmmac_rxp_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rxp_config, __args) +#define stmmac_flex_pps_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, flex_pps_config, __args) +#define stmmac_set_mac_loopback(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args) +#define stmmac_rss_configure(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rss_configure, __args) +#define stmmac_update_vlan_hash(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args) +#define stmmac_enable_vlan(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, enable_vlan, __args) +#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \ + stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args) +#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \ + stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args) +#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args) +#define stmmac_get_mac_tx_timestamp(__priv, __args...) \ + stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args) +#define stmmac_sarc_configure(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, sarc_configure, __args) +#define stmmac_config_l3_filter(__priv, __args...) \ + stmmac_do_callback(__priv, mac, config_l3_filter, __args) +#define stmmac_config_l4_filter(__priv, __args...) \ + stmmac_do_callback(__priv, mac, config_l4_filter, __args) +#define stmmac_set_arp_offload(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_arp_offload, __args) +#define stmmac_est_configure(__priv, __args...) \ + stmmac_do_callback(__priv, mac, est_configure, __args) +#define stmmac_est_irq_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, est_irq_status, __args) +#define stmmac_fpe_configure(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_configure, __args) +#define stmmac_fpe_send_mpacket(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args) +#define stmmac_fpe_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, fpe_irq_status, __args) + +struct stmmac_priv; + +/* PTP and HW Timer helpers */ +struct stmmac_hwtimestamp { + void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); + void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc); + int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); + int (*config_addend) (void __iomem *ioaddr, u32 addend); + int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4); + void (*get_systime) (void __iomem *ioaddr, u64 *systime); + void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); + void (*timestamp_interrupt)(struct stmmac_priv *priv); +}; + +#define stmmac_config_hw_tstamping(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, config_hw_tstamping, __args) +#define stmmac_config_sub_second_increment(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, config_sub_second_increment, __args) +#define stmmac_init_systime(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, init_systime, __args) +#define stmmac_config_addend(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, config_addend, __args) +#define stmmac_adjust_systime(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, adjust_systime, __args) +#define stmmac_get_systime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_systime, __args) +#define stmmac_get_ptptime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) +#define stmmac_timestamp_interrupt(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args) + +/* Helpers to manage the descriptors for chain and ring modes */ +struct stmmac_mode_ops { + void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, + unsigned int extend_desc); + unsigned int (*is_jumbo_frm) (int len, int ehn_desc); + int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); + int (*set_16kib_bfsize)(int mtu); + void (*init_desc3)(struct dma_desc *p); + void (*refill_desc3) (void *priv, struct dma_desc *p); + void (*clean_desc3) (void *priv, struct dma_desc *p); +}; + +#define stmmac_mode_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, init, __args) +#define stmmac_is_jumbo_frm(__priv, __args...) \ + stmmac_do_callback(__priv, mode, is_jumbo_frm, __args) +#define stmmac_jumbo_frm(__priv, __args...) \ + stmmac_do_callback(__priv, mode, jumbo_frm, __args) +#define stmmac_set_16kib_bfsize(__priv, __args...) \ + stmmac_do_callback(__priv, mode, set_16kib_bfsize, __args) +#define stmmac_init_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, init_desc3, __args) +#define stmmac_refill_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, refill_desc3, __args) +#define stmmac_clean_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, clean_desc3, __args) + +struct tc_cls_u32_offload; +struct tc_cbs_qopt_offload; +struct flow_cls_offload; +struct tc_taprio_qopt_offload; +struct tc_etf_qopt_offload; + +struct stmmac_tc_ops { + int (*init)(struct stmmac_priv *priv); + int (*setup_cls_u32)(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls); + int (*setup_cbs)(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt); + int (*setup_cls)(struct stmmac_priv *priv, + struct flow_cls_offload *cls); + int (*setup_taprio)(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt); + int (*setup_etf)(struct stmmac_priv *priv, + struct tc_etf_qopt_offload *qopt); +}; + +#define stmmac_tc_init(__priv, __args...) \ + stmmac_do_callback(__priv, tc, init, __args) +#define stmmac_tc_setup_cls_u32(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cls_u32, __args) +#define stmmac_tc_setup_cbs(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cbs, __args) +#define stmmac_tc_setup_cls(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cls, __args) +#define stmmac_tc_setup_taprio(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_taprio, __args) +#define stmmac_tc_setup_etf(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_etf, __args) + +struct stmmac_counters; + +struct stmmac_mmc_ops { + void (*ctrl)(void __iomem *ioaddr, unsigned int mode); + void (*intr_all_mask)(void __iomem *ioaddr); + void (*read)(void __iomem *ioaddr, struct stmmac_counters *mmc); +}; + +#define stmmac_mmc_ctrl(__priv, __args...) \ + stmmac_do_void_callback(__priv, mmc, ctrl, __args) +#define stmmac_mmc_intr_all_mask(__priv, __args...) \ + stmmac_do_void_callback(__priv, mmc, intr_all_mask, __args) +#define stmmac_mmc_read(__priv, __args...) \ + stmmac_do_void_callback(__priv, mmc, read, __args) + +struct stmmac_regs_off { + u32 ptp_off; + u32 mmc_off; +}; + +extern const struct stmmac_ops dwmac100_ops; +extern const struct stmmac_dma_ops dwmac100_dma_ops; +extern const struct stmmac_ops dwmac1000_ops; +extern const struct stmmac_dma_ops dwmac1000_dma_ops; +extern const struct stmmac_ops dwmac4_ops; +extern const struct stmmac_dma_ops dwmac4_dma_ops; +extern const struct stmmac_ops dwmac410_ops; +extern const struct stmmac_dma_ops dwmac410_dma_ops; +extern const struct stmmac_ops dwmac510_ops; +extern const struct stmmac_tc_ops dwmac510_tc_ops; +extern const struct stmmac_ops dwxgmac210_ops; +extern const struct stmmac_ops dwxlgmac2_ops; +extern const struct stmmac_dma_ops dwxgmac210_dma_ops; +extern const struct stmmac_desc_ops dwxgmac210_desc_ops; +extern const struct stmmac_mmc_ops dwmac_mmc_ops; +extern const struct stmmac_mmc_ops dwxgmac_mmc_ops; + +#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ +#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ + +int stmmac_hwif_init(struct stmmac_priv *priv); + +#endif /* __STMMAC_HWIF_H__ */ diff --git a/devices/stmmac/hwif-6.1-orig.c b/devices/stmmac/hwif-6.1-orig.c new file mode 100644 index 00000000..bb7114f9 --- /dev/null +++ b/devices/stmmac/hwif-6.1-orig.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac HW Interface Handling + */ + +#include "common.h" +#include "stmmac.h" +#include "stmmac_ptp.h" + +static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) +{ + u32 reg = readl(priv->ioaddr + id_reg); + + if (!reg) { + dev_info(priv->device, "Version ID not available\n"); + return 0x0; + } + + dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n", + (unsigned int)(reg & GENMASK(15, 8)) >> 8, + (unsigned int)(reg & GENMASK(7, 0))); + return reg & GENMASK(7, 0); +} + +static u32 stmmac_get_dev_id(struct stmmac_priv *priv, u32 id_reg) +{ + u32 reg = readl(priv->ioaddr + id_reg); + + if (!reg) { + dev_info(priv->device, "Version ID not available\n"); + return 0x0; + } + + return (reg & GENMASK(15, 8)) >> 8; +} + +static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + if (priv->chain_mode) { + dev_info(priv->device, "Chain mode enabled\n"); + priv->mode = STMMAC_CHAIN_MODE; + mac->mode = &chain_mode_ops; + } else { + dev_info(priv->device, "Ring mode enabled\n"); + priv->mode = STMMAC_RING_MODE; + mac->mode = &ring_mode_ops; + } +} + +static int stmmac_dwmac1_quirks(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + if (priv->plat->enh_desc) { + dev_info(priv->device, "Enhanced/Alternate descriptors\n"); + + /* GMAC older than 3.50 has no extended descriptors */ + if (priv->synopsys_id >= DWMAC_CORE_3_50) { + dev_info(priv->device, "Enabled extended descriptors\n"); + priv->extend_desc = 1; + } else { + dev_warn(priv->device, "Extended descriptors not supported\n"); + } + + mac->desc = &enh_desc_ops; + } else { + dev_info(priv->device, "Normal descriptors\n"); + mac->desc = &ndesc_ops; + } + + stmmac_dwmac_mode_quirk(priv); + return 0; +} + +static int stmmac_dwmac4_quirks(struct stmmac_priv *priv) +{ + stmmac_dwmac_mode_quirk(priv); + return 0; +} + +static int stmmac_dwxlgmac_quirks(struct stmmac_priv *priv) +{ + priv->hw->xlgmac = true; + return 0; +} + +static const struct stmmac_hwif_entry { + bool gmac; + bool gmac4; + bool xgmac; + u32 min_id; + u32 dev_id; + const struct stmmac_regs_off regs; + const void *desc; + const void *dma; + const void *mac; + const void *hwtimestamp; + const void *mode; + const void *tc; + const void *mmc; + int (*setup)(struct stmmac_priv *priv); + int (*quirks)(struct stmmac_priv *priv); +} stmmac_hw[] = { + /* NOTE: New HW versions shall go to the end of this table */ + { + .gmac = false, + .gmac4 = false, + .xgmac = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, + .mmc_off = MMC_GMAC3_X_OFFSET, + }, + .desc = NULL, + .dma = &dwmac100_dma_ops, + .mac = &dwmac100_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = NULL, + .mmc = &dwmac_mmc_ops, + .setup = dwmac100_setup, + .quirks = stmmac_dwmac1_quirks, + }, { + .gmac = true, + .gmac4 = false, + .xgmac = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, + .mmc_off = MMC_GMAC3_X_OFFSET, + }, + .desc = NULL, + .dma = &dwmac1000_dma_ops, + .mac = &dwmac1000_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = NULL, + .mmc = &dwmac_mmc_ops, + .setup = dwmac1000_setup, + .quirks = stmmac_dwmac1_quirks, + }, { + .gmac = false, + .gmac4 = true, + .xgmac = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, + .mac = &dwmac4_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = &dwmac510_tc_ops, + .mmc = &dwmac_mmc_ops, + .setup = dwmac4_setup, + .quirks = stmmac_dwmac4_quirks, + }, { + .gmac = false, + .gmac4 = true, + .xgmac = false, + .min_id = DWMAC_CORE_4_00, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, + .mac = &dwmac410_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, + .mmc = &dwmac_mmc_ops, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = true, + .xgmac = false, + .min_id = DWMAC_CORE_4_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac410_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, + .mmc = &dwmac_mmc_ops, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = true, + .xgmac = false, + .min_id = DWMAC_CORE_5_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac510_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, + .mmc = &dwmac_mmc_ops, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = false, + .xgmac = true, + .min_id = DWXGMAC_CORE_2_10, + .dev_id = DWXGMAC_ID, + .regs = { + .ptp_off = PTP_XGMAC_OFFSET, + .mmc_off = MMC_XGMAC_OFFSET, + }, + .desc = &dwxgmac210_desc_ops, + .dma = &dwxgmac210_dma_ops, + .mac = &dwxgmac210_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = &dwmac510_tc_ops, + .mmc = &dwxgmac_mmc_ops, + .setup = dwxgmac2_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = false, + .xgmac = true, + .min_id = DWXLGMAC_CORE_2_00, + .dev_id = DWXLGMAC_ID, + .regs = { + .ptp_off = PTP_XGMAC_OFFSET, + .mmc_off = MMC_XGMAC_OFFSET, + }, + .desc = &dwxgmac210_desc_ops, + .dma = &dwxgmac210_dma_ops, + .mac = &dwxlgmac2_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = &dwmac510_tc_ops, + .mmc = &dwxgmac_mmc_ops, + .setup = dwxlgmac2_setup, + .quirks = stmmac_dwxlgmac_quirks, + }, +}; + +int stmmac_hwif_init(struct stmmac_priv *priv) +{ + bool needs_xgmac = priv->plat->has_xgmac; + bool needs_gmac4 = priv->plat->has_gmac4; + bool needs_gmac = priv->plat->has_gmac; + const struct stmmac_hwif_entry *entry; + struct mac_device_info *mac; + bool needs_setup = true; + u32 id, dev_id = 0; + int i, ret; + + if (needs_gmac) { + id = stmmac_get_id(priv, GMAC_VERSION); + } else if (needs_gmac4 || needs_xgmac) { + id = stmmac_get_id(priv, GMAC4_VERSION); + if (needs_xgmac) + dev_id = stmmac_get_dev_id(priv, GMAC4_VERSION); + } else { + id = 0; + } + + /* Save ID for later use */ + priv->synopsys_id = id; + + /* Lets assume some safe values first */ + priv->ptpaddr = priv->ioaddr + + (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET); + priv->mmcaddr = priv->ioaddr + + (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET); + + /* Check for HW specific setup first */ + if (priv->plat->setup) { + mac = priv->plat->setup(priv); + needs_setup = false; + } else { + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + } + + if (!mac) + return -ENOMEM; + + /* Fallback to generic HW */ + for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) { + entry = &stmmac_hw[i]; + + if (needs_gmac ^ entry->gmac) + continue; + if (needs_gmac4 ^ entry->gmac4) + continue; + if (needs_xgmac ^ entry->xgmac) + continue; + /* Use synopsys_id var because some setups can override this */ + if (priv->synopsys_id < entry->min_id) + continue; + if (needs_xgmac && (dev_id ^ entry->dev_id)) + continue; + + /* Only use generic HW helpers if needed */ + mac->desc = mac->desc ? : entry->desc; + mac->dma = mac->dma ? : entry->dma; + mac->mac = mac->mac ? : entry->mac; + mac->ptp = mac->ptp ? : entry->hwtimestamp; + mac->mode = mac->mode ? : entry->mode; + mac->tc = mac->tc ? : entry->tc; + mac->mmc = mac->mmc ? : entry->mmc; + + priv->hw = mac; + priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; + priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; + + /* Entry found */ + if (needs_setup) { + ret = entry->setup(priv); + if (ret) + return ret; + } + + /* Save quirks, if needed for posterior use */ + priv->hwif_quirks = entry->quirks; + return 0; + } + + dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n", + id, needs_gmac, needs_gmac4); + return -EINVAL; +} diff --git a/devices/stmmac/hwif-6.1-orig.h b/devices/stmmac/hwif-6.1-orig.h new file mode 100644 index 00000000..b2b9cf04 --- /dev/null +++ b/devices/stmmac/hwif-6.1-orig.h @@ -0,0 +1,642 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. +// stmmac HW Interface Callbacks + +#ifndef __STMMAC_HWIF_H__ +#define __STMMAC_HWIF_H__ + +#include +#include + +#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \ +({ \ + int __result = -EINVAL; \ + if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) { \ + (__priv)->hw->__module->__cname((__arg0), ##__args); \ + __result = 0; \ + } \ + __result; \ +}) +#define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \ +({ \ + int __result = -EINVAL; \ + if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) \ + __result = (__priv)->hw->__module->__cname((__arg0), ##__args); \ + __result; \ +}) + +struct stmmac_extra_stats; +struct stmmac_safety_stats; +struct dma_desc; +struct dma_extended_desc; +struct dma_edesc; + +/* Descriptors helpers */ +struct stmmac_desc_ops { + /* DMA RX descriptor ring initialization */ + void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, + int end, int bfsize); + /* DMA TX descriptor ring initialization */ + void (*init_tx_desc)(struct dma_desc *p, int mode, int end); + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, bool ls, + unsigned int tot_pkt_len); + void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, + int len2, bool tx_own, bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen); + /* Set/get the owner of the descriptor */ + void (*set_tx_owner)(struct dma_desc *p); + int (*get_tx_owner)(struct dma_desc *p); + /* Clean the tx descriptor as soon as the tx irq is received */ + void (*release_tx_desc)(struct dma_desc *p, int mode); + /* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted */ + void (*set_tx_ic)(struct dma_desc *p); + /* Last tx segment reports the transmit status */ + int (*get_tx_ls)(struct dma_desc *p); + /* Return the transmit status looking at the TDES1 */ + int (*tx_status)(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr); + /* Get the buffer size from the descriptor */ + int (*get_tx_len)(struct dma_desc *p); + /* Handle extra events on specific interrupts hw dependent */ + void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic); + /* Get the receive frame size */ + int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type); + /* Return the reception status looking at the RDES1 */ + int (*rx_status)(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p); + void (*rx_extended_status)(void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p); + /* Set tx timestamp enable bit */ + void (*enable_tx_timestamp) (struct dma_desc *p); + /* get tx timestamp status */ + int (*get_tx_timestamp_status) (struct dma_desc *p); + /* get timestamp value */ + void (*get_timestamp)(void *desc, u32 ats, u64 *ts); + /* get rx timestamp status */ + int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); + /* Display ring */ + void (*display_ring)(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size); + /* set MSS via context descriptor */ + void (*set_mss)(struct dma_desc *p, unsigned int mss); + /* set descriptor skbuff address */ + void (*set_addr)(struct dma_desc *p, dma_addr_t addr); + /* clear descriptor */ + void (*clear)(struct dma_desc *p); + /* RSS */ + int (*get_rx_hash)(struct dma_desc *p, u32 *hash, + enum pkt_hash_types *type); + void (*get_rx_header_len)(struct dma_desc *p, unsigned int *len); + void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr, bool buf2_valid); + void (*set_sarc)(struct dma_desc *p, u32 sarc_type); + void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag, + u32 inner_type); + void (*set_vlan)(struct dma_desc *p, u32 type); + void (*set_tbs)(struct dma_edesc *p, u32 sec, u32 nsec); +}; + +#define stmmac_init_rx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, init_rx_desc, __args) +#define stmmac_init_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, init_tx_desc, __args) +#define stmmac_prepare_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, prepare_tx_desc, __args) +#define stmmac_prepare_tso_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, prepare_tso_tx_desc, __args) +#define stmmac_set_tx_owner(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tx_owner, __args) +#define stmmac_get_tx_owner(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_owner, __args) +#define stmmac_release_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, release_tx_desc, __args) +#define stmmac_set_tx_ic(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tx_ic, __args) +#define stmmac_get_tx_ls(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_ls, __args) +#define stmmac_tx_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, tx_status, __args) +#define stmmac_get_tx_len(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_len, __args) +#define stmmac_set_rx_owner(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_rx_owner, __args) +#define stmmac_get_rx_frame_len(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_frame_len, __args) +#define stmmac_rx_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, rx_status, __args) +#define stmmac_rx_extended_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, rx_extended_status, __args) +#define stmmac_enable_tx_timestamp(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, enable_tx_timestamp, __args) +#define stmmac_get_tx_timestamp_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_timestamp_status, __args) +#define stmmac_get_timestamp(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, get_timestamp, __args) +#define stmmac_get_rx_timestamp_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_timestamp_status, __args) +#define stmmac_display_ring(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, display_ring, __args) +#define stmmac_set_mss(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_mss, __args) +#define stmmac_set_desc_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_addr, __args) +#define stmmac_clear_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, clear, __args) +#define stmmac_get_rx_hash(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_hash, __args) +#define stmmac_get_rx_header_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, get_rx_header_len, __args) +#define stmmac_set_desc_sec_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_sec_addr, __args) +#define stmmac_set_desc_sarc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_sarc, __args) +#define stmmac_set_desc_vlan_tag(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args) +#define stmmac_set_desc_vlan(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_vlan, __args) +#define stmmac_set_desc_tbs(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tbs, __args) + +struct stmmac_dma_cfg; +struct dma_features; + +/* Specific DMA helpers */ +struct stmmac_dma_ops { + /* DMA core initialization */ + int (*reset)(void __iomem *ioaddr); + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, + int atds); + void (*init_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan); + void (*init_rx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t phy, u32 chan); + void (*init_tx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + dma_addr_t phy, u32 chan); + /* Configure the AXI Bus Mode Register */ + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); + /* Dump DMA registers */ + void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz, u8 qmode); + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz, u8 qmode); + /* To track extra statistic (if supported) */ + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, + void __iomem *ioaddr); + void (*enable_dma_transmission) (void __iomem *ioaddr); + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan, + bool rx, bool tx); + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan, + bool rx, bool tx); + void (*start_tx)(void __iomem *ioaddr, u32 chan); + void (*stop_tx)(void __iomem *ioaddr, u32 chan); + void (*start_rx)(void __iomem *ioaddr, u32 chan); + void (*stop_rx)(void __iomem *ioaddr, u32 chan); + int (*dma_interrupt) (void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan, u32 dir); + /* If supported then get the optional core features */ + int (*get_hw_feature)(void __iomem *ioaddr, + struct dma_features *dma_cap); + /* Program the HW RX Watchdog */ + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue); + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); + void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode); + void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); + void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan); + int (*enable_tbs)(void __iomem *ioaddr, bool en, u32 chan); +}; + +#define stmmac_reset(__priv, __args...) \ + stmmac_do_callback(__priv, dma, reset, __args) +#define stmmac_dma_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init, __args) +#define stmmac_init_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_chan, __args) +#define stmmac_init_rx_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_rx_chan, __args) +#define stmmac_init_tx_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_tx_chan, __args) +#define stmmac_axi(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, axi, __args) +#define stmmac_dump_dma_regs(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dump_regs, __args) +#define stmmac_dma_rx_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args) +#define stmmac_dma_tx_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_tx_mode, __args) +#define stmmac_dma_diagnostic_fr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args) +#define stmmac_enable_dma_transmission(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args) +#define stmmac_enable_dma_irq(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_dma_irq, __args) +#define stmmac_disable_dma_irq(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, disable_dma_irq, __args) +#define stmmac_start_tx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, start_tx, __args) +#define stmmac_stop_tx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, stop_tx, __args) +#define stmmac_start_rx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, start_rx, __args) +#define stmmac_stop_rx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, stop_rx, __args) +#define stmmac_dma_interrupt_status(__priv, __args...) \ + stmmac_do_callback(__priv, dma, dma_interrupt, __args) +#define stmmac_get_hw_feature(__priv, __args...) \ + stmmac_do_callback(__priv, dma, get_hw_feature, __args) +#define stmmac_rx_watchdog(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, rx_watchdog, __args) +#define stmmac_set_tx_ring_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __args) +#define stmmac_set_rx_ring_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __args) +#define stmmac_set_rx_tail_ptr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __args) +#define stmmac_set_tx_tail_ptr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) +#define stmmac_enable_tso(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_tso, __args) +#define stmmac_dma_qmode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, qmode, __args) +#define stmmac_set_dma_bfsize(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_bfsize, __args) +#define stmmac_enable_sph(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_sph, __args) +#define stmmac_enable_tbs(__priv, __args...) \ + stmmac_do_callback(__priv, dma, enable_tbs, __args) + +struct mac_device_info; +struct net_device; +struct rgmii_adv; +struct stmmac_tc_entry; +struct stmmac_pps_cfg; +struct stmmac_rss; +struct stmmac_est; + +/* Helpers to program the MAC core */ +struct stmmac_ops { + /* MAC core initialization */ + void (*core_init)(struct mac_device_info *hw, struct net_device *dev); + /* Enable the MAC RX/TX */ + void (*set_mac)(void __iomem *ioaddr, bool enable); + /* Enable and verify that the IPC module is supported */ + int (*rx_ipc)(struct mac_device_info *hw); + /* Enable RX Queues */ + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); + /* RX Queues Priority */ + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* TX Queues Priority */ + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* RX Queues Routing */ + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, + u32 queue); + /* Program RX Algorithms */ + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); + /* Program TX Algorithms */ + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); + /* Set MTL TX queues weight */ + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, + u32 weight, u32 queue); + /* RX MTL queue to RX dma mapping */ + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); + /* Configure AV Algorithm */ + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, + u32 idle_slope, u32 high_credit, u32 low_credit, + u32 queue); + /* Dump MAC registers */ + void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); + /* Handle extra events on specific interrupts hw dependent */ + int (*host_irq_status)(struct mac_device_info *hw, + struct stmmac_extra_stats *x); + /* Handle MTL interrupts */ + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); + /* Multicast filter setting */ + void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); + /* Flow control setting */ + void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, u32 tx_cnt); + /* Set power management mode (e.g. magic frame) */ + void (*pmt)(struct mac_device_info *hw, unsigned long mode); + /* Set/Get Unicast MAC addresses */ + void (*set_umac_addr)(struct mac_device_info *hw, + const unsigned char *addr, + unsigned int reg_n); + void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + unsigned int reg_n); + void (*set_eee_mode)(struct mac_device_info *hw, + bool en_tx_lpi_clockgating); + void (*reset_eee_mode)(struct mac_device_info *hw); + void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et); + void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); + void (*set_eee_pls)(struct mac_device_info *hw, int link); + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues); + /* PCS calls */ + void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback); + void (*pcs_rane)(void __iomem *ioaddr, bool restart); + void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); + /* Safety Features */ + int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_feature_cfg *safety_cfg); + int (*safety_feat_irq_status)(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats); + int (*safety_feat_dump)(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc); + /* Flexible RX Parser */ + int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count); + /* Flexible PPS */ + int (*flex_pps_config)(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); + /* Loopback for selftests */ + void (*set_mac_loopback)(void __iomem *ioaddr, bool enable); + /* RSS */ + int (*rss_configure)(struct mac_device_info *hw, + struct stmmac_rss *cfg, u32 num_rxq); + /* VLAN */ + void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash, + __le16 perfect_match, bool is_double); + void (*enable_vlan)(struct mac_device_info *hw, u32 type); + int (*add_hw_vlan_rx_fltr)(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid); + int (*del_hw_vlan_rx_fltr)(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid); + void (*restore_hw_vlan_rx_fltr)(struct net_device *dev, + struct mac_device_info *hw); + /* TX Timestamp */ + int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts); + /* Source Address Insertion / Replacement */ + void (*sarc_configure)(void __iomem *ioaddr, int val); + /* Filtering */ + int (*config_l3_filter)(struct mac_device_info *hw, u32 filter_no, + bool en, bool ipv6, bool sa, bool inv, + u32 match); + int (*config_l4_filter)(struct mac_device_info *hw, u32 filter_no, + bool en, bool udp, bool sa, bool inv, + u32 match); + void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); + int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg, + unsigned int ptp_rate); + void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); + void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, + bool enable); + void (*fpe_send_mpacket)(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, + enum stmmac_mpacket_type type); + int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev); +}; + +#define stmmac_core_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, core_init, __args) +#define stmmac_mac_set(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mac, __args) +#define stmmac_rx_ipc(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rx_ipc, __args) +#define stmmac_rx_queue_enable(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_enable, __args) +#define stmmac_rx_queue_prio(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_prio, __args) +#define stmmac_tx_queue_prio(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, tx_queue_prio, __args) +#define stmmac_rx_queue_routing(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_routing, __args) +#define stmmac_prog_mtl_rx_algorithms(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, prog_mtl_rx_algorithms, __args) +#define stmmac_prog_mtl_tx_algorithms(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, prog_mtl_tx_algorithms, __args) +#define stmmac_set_mtl_tx_queue_weight(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __args) +#define stmmac_map_mtl_to_dma(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, map_mtl_to_dma, __args) +#define stmmac_config_cbs(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, config_cbs, __args) +#define stmmac_dump_mac_regs(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, dump_regs, __args) +#define stmmac_host_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, host_irq_status, __args) +#define stmmac_host_mtl_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, host_mtl_irq_status, __args) +#define stmmac_set_filter(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_filter, __args) +#define stmmac_flow_ctrl(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, flow_ctrl, __args) +#define stmmac_pmt(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pmt, __args) +#define stmmac_set_umac_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_umac_addr, __args) +#define stmmac_get_umac_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, get_umac_addr, __args) +#define stmmac_set_eee_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_mode, __args) +#define stmmac_reset_eee_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args) +#define stmmac_set_eee_lpi_timer(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args) +#define stmmac_set_eee_timer(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_timer, __args) +#define stmmac_set_eee_pls(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_pls, __args) +#define stmmac_mac_debug(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, debug, __args) +#define stmmac_pcs_ctrl_ane(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args) +#define stmmac_pcs_rane(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_rane, __args) +#define stmmac_pcs_get_adv_lp(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args) +#define stmmac_safety_feat_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_config, __args) +#define stmmac_safety_feat_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args) +#define stmmac_safety_feat_dump(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_dump, __args) +#define stmmac_rxp_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rxp_config, __args) +#define stmmac_flex_pps_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, flex_pps_config, __args) +#define stmmac_set_mac_loopback(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args) +#define stmmac_rss_configure(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rss_configure, __args) +#define stmmac_update_vlan_hash(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args) +#define stmmac_enable_vlan(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, enable_vlan, __args) +#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \ + stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args) +#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \ + stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args) +#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args) +#define stmmac_get_mac_tx_timestamp(__priv, __args...) \ + stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args) +#define stmmac_sarc_configure(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, sarc_configure, __args) +#define stmmac_config_l3_filter(__priv, __args...) \ + stmmac_do_callback(__priv, mac, config_l3_filter, __args) +#define stmmac_config_l4_filter(__priv, __args...) \ + stmmac_do_callback(__priv, mac, config_l4_filter, __args) +#define stmmac_set_arp_offload(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_arp_offload, __args) +#define stmmac_est_configure(__priv, __args...) \ + stmmac_do_callback(__priv, mac, est_configure, __args) +#define stmmac_est_irq_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, est_irq_status, __args) +#define stmmac_fpe_configure(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_configure, __args) +#define stmmac_fpe_send_mpacket(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args) +#define stmmac_fpe_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, fpe_irq_status, __args) + +struct stmmac_priv; + +/* PTP and HW Timer helpers */ +struct stmmac_hwtimestamp { + void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); + void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc); + int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); + int (*config_addend) (void __iomem *ioaddr, u32 addend); + int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4); + void (*get_systime) (void __iomem *ioaddr, u64 *systime); + void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); + void (*timestamp_interrupt)(struct stmmac_priv *priv); +}; + +#define stmmac_config_hw_tstamping(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, config_hw_tstamping, __args) +#define stmmac_config_sub_second_increment(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, config_sub_second_increment, __args) +#define stmmac_init_systime(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, init_systime, __args) +#define stmmac_config_addend(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, config_addend, __args) +#define stmmac_adjust_systime(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, adjust_systime, __args) +#define stmmac_get_systime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_systime, __args) +#define stmmac_get_ptptime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) +#define stmmac_timestamp_interrupt(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args) + +/* Helpers to manage the descriptors for chain and ring modes */ +struct stmmac_mode_ops { + void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, + unsigned int extend_desc); + unsigned int (*is_jumbo_frm) (int len, int ehn_desc); + int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); + int (*set_16kib_bfsize)(int mtu); + void (*init_desc3)(struct dma_desc *p); + void (*refill_desc3) (void *priv, struct dma_desc *p); + void (*clean_desc3) (void *priv, struct dma_desc *p); +}; + +#define stmmac_mode_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, init, __args) +#define stmmac_is_jumbo_frm(__priv, __args...) \ + stmmac_do_callback(__priv, mode, is_jumbo_frm, __args) +#define stmmac_jumbo_frm(__priv, __args...) \ + stmmac_do_callback(__priv, mode, jumbo_frm, __args) +#define stmmac_set_16kib_bfsize(__priv, __args...) \ + stmmac_do_callback(__priv, mode, set_16kib_bfsize, __args) +#define stmmac_init_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, init_desc3, __args) +#define stmmac_refill_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, refill_desc3, __args) +#define stmmac_clean_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, clean_desc3, __args) + +struct tc_cls_u32_offload; +struct tc_cbs_qopt_offload; +struct flow_cls_offload; +struct tc_taprio_qopt_offload; +struct tc_etf_qopt_offload; + +struct stmmac_tc_ops { + int (*init)(struct stmmac_priv *priv); + int (*setup_cls_u32)(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls); + int (*setup_cbs)(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt); + int (*setup_cls)(struct stmmac_priv *priv, + struct flow_cls_offload *cls); + int (*setup_taprio)(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt); + int (*setup_etf)(struct stmmac_priv *priv, + struct tc_etf_qopt_offload *qopt); +}; + +#define stmmac_tc_init(__priv, __args...) \ + stmmac_do_callback(__priv, tc, init, __args) +#define stmmac_tc_setup_cls_u32(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cls_u32, __args) +#define stmmac_tc_setup_cbs(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cbs, __args) +#define stmmac_tc_setup_cls(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cls, __args) +#define stmmac_tc_setup_taprio(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_taprio, __args) +#define stmmac_tc_setup_etf(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_etf, __args) + +struct stmmac_counters; + +struct stmmac_mmc_ops { + void (*ctrl)(void __iomem *ioaddr, unsigned int mode); + void (*intr_all_mask)(void __iomem *ioaddr); + void (*read)(void __iomem *ioaddr, struct stmmac_counters *mmc); +}; + +#define stmmac_mmc_ctrl(__priv, __args...) \ + stmmac_do_void_callback(__priv, mmc, ctrl, __args) +#define stmmac_mmc_intr_all_mask(__priv, __args...) \ + stmmac_do_void_callback(__priv, mmc, intr_all_mask, __args) +#define stmmac_mmc_read(__priv, __args...) \ + stmmac_do_void_callback(__priv, mmc, read, __args) + +struct stmmac_regs_off { + u32 ptp_off; + u32 mmc_off; +}; + +extern const struct stmmac_ops dwmac100_ops; +extern const struct stmmac_dma_ops dwmac100_dma_ops; +extern const struct stmmac_ops dwmac1000_ops; +extern const struct stmmac_dma_ops dwmac1000_dma_ops; +extern const struct stmmac_ops dwmac4_ops; +extern const struct stmmac_dma_ops dwmac4_dma_ops; +extern const struct stmmac_ops dwmac410_ops; +extern const struct stmmac_dma_ops dwmac410_dma_ops; +extern const struct stmmac_ops dwmac510_ops; +extern const struct stmmac_tc_ops dwmac510_tc_ops; +extern const struct stmmac_ops dwxgmac210_ops; +extern const struct stmmac_ops dwxlgmac2_ops; +extern const struct stmmac_dma_ops dwxgmac210_dma_ops; +extern const struct stmmac_desc_ops dwxgmac210_desc_ops; +extern const struct stmmac_mmc_ops dwmac_mmc_ops; +extern const struct stmmac_mmc_ops dwxgmac_mmc_ops; + +#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ +#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ + +int stmmac_hwif_init(struct stmmac_priv *priv); + +#endif /* __STMMAC_HWIF_H__ */ diff --git a/devices/stmmac/mmc-6.1-ethercat.h b/devices/stmmac/mmc-6.1-ethercat.h new file mode 100644 index 00000000..a0c05925 --- /dev/null +++ b/devices/stmmac/mmc-6.1-ethercat.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + MMC Header file + + Copyright (C) 2011 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __MMC_H__ +#define __MMC_H__ + +/* MMC control register */ +/* When set, all counter are reset */ +#define MMC_CNTRL_COUNTER_RESET 0x1 +/* When set, do not roll over zero after reaching the max value*/ +#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2 +#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */ +#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the + * current value.*/ +#define MMC_CNTRL_PRESET 0x10 +#define MMC_CNTRL_FULL_HALF_PRESET 0x20 + +#define MMC_GMAC4_OFFSET 0x700 +#define MMC_GMAC3_X_OFFSET 0x100 +#define MMC_XGMAC_OFFSET 0x800 + +struct stmmac_counters { + unsigned int mmc_tx_octetcount_gb; + unsigned int mmc_tx_framecount_gb; + unsigned int mmc_tx_broadcastframe_g; + unsigned int mmc_tx_multicastframe_g; + unsigned int mmc_tx_64_octets_gb; + unsigned int mmc_tx_65_to_127_octets_gb; + unsigned int mmc_tx_128_to_255_octets_gb; + unsigned int mmc_tx_256_to_511_octets_gb; + unsigned int mmc_tx_512_to_1023_octets_gb; + unsigned int mmc_tx_1024_to_max_octets_gb; + unsigned int mmc_tx_unicast_gb; + unsigned int mmc_tx_multicast_gb; + unsigned int mmc_tx_broadcast_gb; + unsigned int mmc_tx_underflow_error; + unsigned int mmc_tx_singlecol_g; + unsigned int mmc_tx_multicol_g; + unsigned int mmc_tx_deferred; + unsigned int mmc_tx_latecol; + unsigned int mmc_tx_exesscol; + unsigned int mmc_tx_carrier_error; + unsigned int mmc_tx_octetcount_g; + unsigned int mmc_tx_framecount_g; + unsigned int mmc_tx_excessdef; + unsigned int mmc_tx_pause_frame; + unsigned int mmc_tx_vlan_frame_g; + + /* MMC RX counter registers */ + unsigned int mmc_rx_framecount_gb; + unsigned int mmc_rx_octetcount_gb; + unsigned int mmc_rx_octetcount_g; + unsigned int mmc_rx_broadcastframe_g; + unsigned int mmc_rx_multicastframe_g; + unsigned int mmc_rx_crc_error; + unsigned int mmc_rx_align_error; + unsigned int mmc_rx_run_error; + unsigned int mmc_rx_jabber_error; + unsigned int mmc_rx_undersize_g; + unsigned int mmc_rx_oversize_g; + unsigned int mmc_rx_64_octets_gb; + unsigned int mmc_rx_65_to_127_octets_gb; + unsigned int mmc_rx_128_to_255_octets_gb; + unsigned int mmc_rx_256_to_511_octets_gb; + unsigned int mmc_rx_512_to_1023_octets_gb; + unsigned int mmc_rx_1024_to_max_octets_gb; + unsigned int mmc_rx_unicast_g; + unsigned int mmc_rx_length_error; + unsigned int mmc_rx_autofrangetype; + unsigned int mmc_rx_pause_frames; + unsigned int mmc_rx_fifo_overflow; + unsigned int mmc_rx_vlan_frames_gb; + unsigned int mmc_rx_watchdog_error; + /* IPC */ + unsigned int mmc_rx_ipc_intr_mask; + unsigned int mmc_rx_ipc_intr; + /* IPv4 */ + unsigned int mmc_rx_ipv4_gd; + unsigned int mmc_rx_ipv4_hderr; + unsigned int mmc_rx_ipv4_nopay; + unsigned int mmc_rx_ipv4_frag; + unsigned int mmc_rx_ipv4_udsbl; + + unsigned int mmc_rx_ipv4_gd_octets; + unsigned int mmc_rx_ipv4_hderr_octets; + unsigned int mmc_rx_ipv4_nopay_octets; + unsigned int mmc_rx_ipv4_frag_octets; + unsigned int mmc_rx_ipv4_udsbl_octets; + + /* IPV6 */ + unsigned int mmc_rx_ipv6_gd_octets; + unsigned int mmc_rx_ipv6_hderr_octets; + unsigned int mmc_rx_ipv6_nopay_octets; + + unsigned int mmc_rx_ipv6_gd; + unsigned int mmc_rx_ipv6_hderr; + unsigned int mmc_rx_ipv6_nopay; + + /* Protocols */ + unsigned int mmc_rx_udp_gd; + unsigned int mmc_rx_udp_err; + unsigned int mmc_rx_tcp_gd; + unsigned int mmc_rx_tcp_err; + unsigned int mmc_rx_icmp_gd; + unsigned int mmc_rx_icmp_err; + + unsigned int mmc_rx_udp_gd_octets; + unsigned int mmc_rx_udp_err_octets; + unsigned int mmc_rx_tcp_gd_octets; + unsigned int mmc_rx_tcp_err_octets; + unsigned int mmc_rx_icmp_gd_octets; + unsigned int mmc_rx_icmp_err_octets; + + /* FPE */ + unsigned int mmc_tx_fpe_fragment_cntr; + unsigned int mmc_tx_hold_req_cntr; + unsigned int mmc_rx_packet_assembly_err_cntr; + unsigned int mmc_rx_packet_smd_err_cntr; + unsigned int mmc_rx_packet_assembly_ok_cntr; + unsigned int mmc_rx_fpe_fragment_cntr; +}; + +#endif /* __MMC_H__ */ diff --git a/devices/stmmac/mmc-6.1-orig.h b/devices/stmmac/mmc-6.1-orig.h new file mode 100644 index 00000000..a0c05925 --- /dev/null +++ b/devices/stmmac/mmc-6.1-orig.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + MMC Header file + + Copyright (C) 2011 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __MMC_H__ +#define __MMC_H__ + +/* MMC control register */ +/* When set, all counter are reset */ +#define MMC_CNTRL_COUNTER_RESET 0x1 +/* When set, do not roll over zero after reaching the max value*/ +#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2 +#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */ +#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the + * current value.*/ +#define MMC_CNTRL_PRESET 0x10 +#define MMC_CNTRL_FULL_HALF_PRESET 0x20 + +#define MMC_GMAC4_OFFSET 0x700 +#define MMC_GMAC3_X_OFFSET 0x100 +#define MMC_XGMAC_OFFSET 0x800 + +struct stmmac_counters { + unsigned int mmc_tx_octetcount_gb; + unsigned int mmc_tx_framecount_gb; + unsigned int mmc_tx_broadcastframe_g; + unsigned int mmc_tx_multicastframe_g; + unsigned int mmc_tx_64_octets_gb; + unsigned int mmc_tx_65_to_127_octets_gb; + unsigned int mmc_tx_128_to_255_octets_gb; + unsigned int mmc_tx_256_to_511_octets_gb; + unsigned int mmc_tx_512_to_1023_octets_gb; + unsigned int mmc_tx_1024_to_max_octets_gb; + unsigned int mmc_tx_unicast_gb; + unsigned int mmc_tx_multicast_gb; + unsigned int mmc_tx_broadcast_gb; + unsigned int mmc_tx_underflow_error; + unsigned int mmc_tx_singlecol_g; + unsigned int mmc_tx_multicol_g; + unsigned int mmc_tx_deferred; + unsigned int mmc_tx_latecol; + unsigned int mmc_tx_exesscol; + unsigned int mmc_tx_carrier_error; + unsigned int mmc_tx_octetcount_g; + unsigned int mmc_tx_framecount_g; + unsigned int mmc_tx_excessdef; + unsigned int mmc_tx_pause_frame; + unsigned int mmc_tx_vlan_frame_g; + + /* MMC RX counter registers */ + unsigned int mmc_rx_framecount_gb; + unsigned int mmc_rx_octetcount_gb; + unsigned int mmc_rx_octetcount_g; + unsigned int mmc_rx_broadcastframe_g; + unsigned int mmc_rx_multicastframe_g; + unsigned int mmc_rx_crc_error; + unsigned int mmc_rx_align_error; + unsigned int mmc_rx_run_error; + unsigned int mmc_rx_jabber_error; + unsigned int mmc_rx_undersize_g; + unsigned int mmc_rx_oversize_g; + unsigned int mmc_rx_64_octets_gb; + unsigned int mmc_rx_65_to_127_octets_gb; + unsigned int mmc_rx_128_to_255_octets_gb; + unsigned int mmc_rx_256_to_511_octets_gb; + unsigned int mmc_rx_512_to_1023_octets_gb; + unsigned int mmc_rx_1024_to_max_octets_gb; + unsigned int mmc_rx_unicast_g; + unsigned int mmc_rx_length_error; + unsigned int mmc_rx_autofrangetype; + unsigned int mmc_rx_pause_frames; + unsigned int mmc_rx_fifo_overflow; + unsigned int mmc_rx_vlan_frames_gb; + unsigned int mmc_rx_watchdog_error; + /* IPC */ + unsigned int mmc_rx_ipc_intr_mask; + unsigned int mmc_rx_ipc_intr; + /* IPv4 */ + unsigned int mmc_rx_ipv4_gd; + unsigned int mmc_rx_ipv4_hderr; + unsigned int mmc_rx_ipv4_nopay; + unsigned int mmc_rx_ipv4_frag; + unsigned int mmc_rx_ipv4_udsbl; + + unsigned int mmc_rx_ipv4_gd_octets; + unsigned int mmc_rx_ipv4_hderr_octets; + unsigned int mmc_rx_ipv4_nopay_octets; + unsigned int mmc_rx_ipv4_frag_octets; + unsigned int mmc_rx_ipv4_udsbl_octets; + + /* IPV6 */ + unsigned int mmc_rx_ipv6_gd_octets; + unsigned int mmc_rx_ipv6_hderr_octets; + unsigned int mmc_rx_ipv6_nopay_octets; + + unsigned int mmc_rx_ipv6_gd; + unsigned int mmc_rx_ipv6_hderr; + unsigned int mmc_rx_ipv6_nopay; + + /* Protocols */ + unsigned int mmc_rx_udp_gd; + unsigned int mmc_rx_udp_err; + unsigned int mmc_rx_tcp_gd; + unsigned int mmc_rx_tcp_err; + unsigned int mmc_rx_icmp_gd; + unsigned int mmc_rx_icmp_err; + + unsigned int mmc_rx_udp_gd_octets; + unsigned int mmc_rx_udp_err_octets; + unsigned int mmc_rx_tcp_gd_octets; + unsigned int mmc_rx_tcp_err_octets; + unsigned int mmc_rx_icmp_gd_octets; + unsigned int mmc_rx_icmp_err_octets; + + /* FPE */ + unsigned int mmc_tx_fpe_fragment_cntr; + unsigned int mmc_tx_hold_req_cntr; + unsigned int mmc_rx_packet_assembly_err_cntr; + unsigned int mmc_rx_packet_smd_err_cntr; + unsigned int mmc_rx_packet_assembly_ok_cntr; + unsigned int mmc_rx_fpe_fragment_cntr; +}; + +#endif /* __MMC_H__ */ diff --git a/devices/stmmac/mmc_core-6.1-ethercat.c b/devices/stmmac/mmc_core-6.1-ethercat.c new file mode 100644 index 00000000..b2c0b6fe --- /dev/null +++ b/devices/stmmac/mmc_core-6.1-ethercat.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + DWMAC Management Counters + + Copyright (C) 2011 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include "hwif-6.1-ethercat.h" +#include "mmc-6.1-ethercat.h" + +/* MAC Management Counters register offset */ + +#define MMC_CNTRL 0x00 /* MMC Control */ +#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */ +#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */ +#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */ +#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */ +#define MMC_DEFAULT_MASK 0xffffffff + +/* MMC TX counter registers */ + +/* Note: + * _GB register stands for good and bad frames + * _G is for good only. + */ +#define MMC_TX_OCTETCOUNT_GB 0x14 +#define MMC_TX_FRAMECOUNT_GB 0x18 +#define MMC_TX_BROADCASTFRAME_G 0x1c +#define MMC_TX_MULTICASTFRAME_G 0x20 +#define MMC_TX_64_OCTETS_GB 0x24 +#define MMC_TX_65_TO_127_OCTETS_GB 0x28 +#define MMC_TX_128_TO_255_OCTETS_GB 0x2c +#define MMC_TX_256_TO_511_OCTETS_GB 0x30 +#define MMC_TX_512_TO_1023_OCTETS_GB 0x34 +#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38 +#define MMC_TX_UNICAST_GB 0x3c +#define MMC_TX_MULTICAST_GB 0x40 +#define MMC_TX_BROADCAST_GB 0x44 +#define MMC_TX_UNDERFLOW_ERROR 0x48 +#define MMC_TX_SINGLECOL_G 0x4c +#define MMC_TX_MULTICOL_G 0x50 +#define MMC_TX_DEFERRED 0x54 +#define MMC_TX_LATECOL 0x58 +#define MMC_TX_EXESSCOL 0x5c +#define MMC_TX_CARRIER_ERROR 0x60 +#define MMC_TX_OCTETCOUNT_G 0x64 +#define MMC_TX_FRAMECOUNT_G 0x68 +#define MMC_TX_EXCESSDEF 0x6c +#define MMC_TX_PAUSE_FRAME 0x70 +#define MMC_TX_VLAN_FRAME_G 0x74 + +/* MMC RX counter registers */ +#define MMC_RX_FRAMECOUNT_GB 0x80 +#define MMC_RX_OCTETCOUNT_GB 0x84 +#define MMC_RX_OCTETCOUNT_G 0x88 +#define MMC_RX_BROADCASTFRAME_G 0x8c +#define MMC_RX_MULTICASTFRAME_G 0x90 +#define MMC_RX_CRC_ERROR 0x94 +#define MMC_RX_ALIGN_ERROR 0x98 +#define MMC_RX_RUN_ERROR 0x9C +#define MMC_RX_JABBER_ERROR 0xA0 +#define MMC_RX_UNDERSIZE_G 0xA4 +#define MMC_RX_OVERSIZE_G 0xA8 +#define MMC_RX_64_OCTETS_GB 0xAC +#define MMC_RX_65_TO_127_OCTETS_GB 0xb0 +#define MMC_RX_128_TO_255_OCTETS_GB 0xb4 +#define MMC_RX_256_TO_511_OCTETS_GB 0xb8 +#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc +#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0 +#define MMC_RX_UNICAST_G 0xc4 +#define MMC_RX_LENGTH_ERROR 0xc8 +#define MMC_RX_AUTOFRANGETYPE 0xcc +#define MMC_RX_PAUSE_FRAMES 0xd0 +#define MMC_RX_FIFO_OVERFLOW 0xd4 +#define MMC_RX_VLAN_FRAMES_GB 0xd8 +#define MMC_RX_WATCHDOG_ERROR 0xdc +/* IPC*/ +#define MMC_RX_IPC_INTR_MASK 0x100 +#define MMC_RX_IPC_INTR 0x108 +/* IPv4*/ +#define MMC_RX_IPV4_GD 0x110 +#define MMC_RX_IPV4_HDERR 0x114 +#define MMC_RX_IPV4_NOPAY 0x118 +#define MMC_RX_IPV4_FRAG 0x11C +#define MMC_RX_IPV4_UDSBL 0x120 + +#define MMC_RX_IPV4_GD_OCTETS 0x150 +#define MMC_RX_IPV4_HDERR_OCTETS 0x154 +#define MMC_RX_IPV4_NOPAY_OCTETS 0x158 +#define MMC_RX_IPV4_FRAG_OCTETS 0x15c +#define MMC_RX_IPV4_UDSBL_OCTETS 0x160 + +/* IPV6*/ +#define MMC_RX_IPV6_GD_OCTETS 0x164 +#define MMC_RX_IPV6_HDERR_OCTETS 0x168 +#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c + +#define MMC_RX_IPV6_GD 0x124 +#define MMC_RX_IPV6_HDERR 0x128 +#define MMC_RX_IPV6_NOPAY 0x12c + +/* Protocols*/ +#define MMC_RX_UDP_GD 0x130 +#define MMC_RX_UDP_ERR 0x134 +#define MMC_RX_TCP_GD 0x138 +#define MMC_RX_TCP_ERR 0x13c +#define MMC_RX_ICMP_GD 0x140 +#define MMC_RX_ICMP_ERR 0x144 + +#define MMC_RX_UDP_GD_OCTETS 0x170 +#define MMC_RX_UDP_ERR_OCTETS 0x174 +#define MMC_RX_TCP_GD_OCTETS 0x178 +#define MMC_RX_TCP_ERR_OCTETS 0x17c +#define MMC_RX_ICMP_GD_OCTETS 0x180 +#define MMC_RX_ICMP_ERR_OCTETS 0x184 + +#define MMC_TX_FPE_FRAG 0x1a8 +#define MMC_TX_HOLD_REQ 0x1ac +#define MMC_RX_PKT_ASSEMBLY_ERR 0x1c8 +#define MMC_RX_PKT_SMD_ERR 0x1cc +#define MMC_RX_PKT_ASSEMBLY_OK 0x1d0 +#define MMC_RX_FPE_FRAG 0x1d4 + +/* XGMAC MMC Registers */ +#define MMC_XGMAC_TX_OCTET_GB 0x14 +#define MMC_XGMAC_TX_PKT_GB 0x1c +#define MMC_XGMAC_TX_BROAD_PKT_G 0x24 +#define MMC_XGMAC_TX_MULTI_PKT_G 0x2c +#define MMC_XGMAC_TX_64OCT_GB 0x34 +#define MMC_XGMAC_TX_65OCT_GB 0x3c +#define MMC_XGMAC_TX_128OCT_GB 0x44 +#define MMC_XGMAC_TX_256OCT_GB 0x4c +#define MMC_XGMAC_TX_512OCT_GB 0x54 +#define MMC_XGMAC_TX_1024OCT_GB 0x5c +#define MMC_XGMAC_TX_UNI_PKT_GB 0x64 +#define MMC_XGMAC_TX_MULTI_PKT_GB 0x6c +#define MMC_XGMAC_TX_BROAD_PKT_GB 0x74 +#define MMC_XGMAC_TX_UNDER 0x7c +#define MMC_XGMAC_TX_OCTET_G 0x84 +#define MMC_XGMAC_TX_PKT_G 0x8c +#define MMC_XGMAC_TX_PAUSE 0x94 +#define MMC_XGMAC_TX_VLAN_PKT_G 0x9c +#define MMC_XGMAC_TX_LPI_USEC 0xa4 +#define MMC_XGMAC_TX_LPI_TRAN 0xa8 + +#define MMC_XGMAC_RX_PKT_GB 0x100 +#define MMC_XGMAC_RX_OCTET_GB 0x108 +#define MMC_XGMAC_RX_OCTET_G 0x110 +#define MMC_XGMAC_RX_BROAD_PKT_G 0x118 +#define MMC_XGMAC_RX_MULTI_PKT_G 0x120 +#define MMC_XGMAC_RX_CRC_ERR 0x128 +#define MMC_XGMAC_RX_RUNT_ERR 0x130 +#define MMC_XGMAC_RX_JABBER_ERR 0x134 +#define MMC_XGMAC_RX_UNDER 0x138 +#define MMC_XGMAC_RX_OVER 0x13c +#define MMC_XGMAC_RX_64OCT_GB 0x140 +#define MMC_XGMAC_RX_65OCT_GB 0x148 +#define MMC_XGMAC_RX_128OCT_GB 0x150 +#define MMC_XGMAC_RX_256OCT_GB 0x158 +#define MMC_XGMAC_RX_512OCT_GB 0x160 +#define MMC_XGMAC_RX_1024OCT_GB 0x168 +#define MMC_XGMAC_RX_UNI_PKT_G 0x170 +#define MMC_XGMAC_RX_LENGTH_ERR 0x178 +#define MMC_XGMAC_RX_RANGE 0x180 +#define MMC_XGMAC_RX_PAUSE 0x188 +#define MMC_XGMAC_RX_FIFOOVER_PKT 0x190 +#define MMC_XGMAC_RX_VLAN_PKT_GB 0x198 +#define MMC_XGMAC_RX_WATCHDOG_ERR 0x1a0 +#define MMC_XGMAC_RX_LPI_USEC 0x1a4 +#define MMC_XGMAC_RX_LPI_TRAN 0x1a8 +#define MMC_XGMAC_RX_DISCARD_PKT_GB 0x1ac +#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4 +#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc + +#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204 +#define MMC_XGMAC_TX_FPE_FRAG 0x208 +#define MMC_XGMAC_TX_HOLD_REQ 0x20c +#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224 +#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228 +#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c +#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 +#define MMC_XGMAC_RX_FPE_FRAG 0x234 +#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c + +static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) +{ + u32 value = readl(mmcaddr + MMC_CNTRL); + + value |= (mode & 0x3F); + + writel(value, mmcaddr + MMC_CNTRL); + + pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", + MMC_CNTRL, value); +} + +/* To mask all interrupts.*/ +static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr) +{ + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK); +} + +/* This reads the MAC core counters (if actaully supported). + * by default the MMC core is programmed to reset each + * counter after a read. So all the field of the mmc struct + * have to be incremented. + */ +static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc) +{ + mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB); + mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB); + mmc->mmc_tx_broadcastframe_g += readl(mmcaddr + + MMC_TX_BROADCASTFRAME_G); + mmc->mmc_tx_multicastframe_g += readl(mmcaddr + + MMC_TX_MULTICASTFRAME_G); + mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB); + mmc->mmc_tx_65_to_127_octets_gb += + readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB); + mmc->mmc_tx_128_to_255_octets_gb += + readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB); + mmc->mmc_tx_256_to_511_octets_gb += + readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB); + mmc->mmc_tx_512_to_1023_octets_gb += + readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB); + mmc->mmc_tx_1024_to_max_octets_gb += + readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB); + mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB); + mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB); + mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR); + mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G); + mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G); + mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED); + mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL); + mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL); + mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR); + mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G); + mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G); + mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF); + mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME); + mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G); + + /* MMC RX counter registers */ + mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB); + mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB); + mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G); + mmc->mmc_rx_broadcastframe_g += readl(mmcaddr + + MMC_RX_BROADCASTFRAME_G); + mmc->mmc_rx_multicastframe_g += readl(mmcaddr + + MMC_RX_MULTICASTFRAME_G); + mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR); + mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR); + mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR); + mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR); + mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G); + mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G); + mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB); + mmc->mmc_rx_65_to_127_octets_gb += + readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB); + mmc->mmc_rx_128_to_255_octets_gb += + readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB); + mmc->mmc_rx_256_to_511_octets_gb += + readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB); + mmc->mmc_rx_512_to_1023_octets_gb += + readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB); + mmc->mmc_rx_1024_to_max_octets_gb += + readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G); + mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR); + mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE); + mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES); + mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW); + mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB); + mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR); + /* IPC */ + mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK); + mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR); + /* IPv4 */ + mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD); + mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR); + mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY); + mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG); + mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL); + + mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS); + mmc->mmc_rx_ipv4_hderr_octets += + readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS); + mmc->mmc_rx_ipv4_nopay_octets += + readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS); + mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr + + MMC_RX_IPV4_FRAG_OCTETS); + mmc->mmc_rx_ipv4_udsbl_octets += + readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS); + + /* IPV6 */ + mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS); + mmc->mmc_rx_ipv6_hderr_octets += + readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS); + mmc->mmc_rx_ipv6_nopay_octets += + readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS); + + mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD); + mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR); + mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY); + + /* Protocols */ + mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD); + mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR); + mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD); + mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR); + mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD); + mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR); + + mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS); + mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS); + mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS); + mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS); + mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS); + mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS); + + mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_TX_FPE_FRAG); + mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_TX_HOLD_REQ); + mmc->mmc_rx_packet_assembly_err_cntr += + readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_ERR); + mmc->mmc_rx_packet_smd_err_cntr += readl(mmcaddr + MMC_RX_PKT_SMD_ERR); + mmc->mmc_rx_packet_assembly_ok_cntr += + readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_OK); + mmc->mmc_rx_fpe_fragment_cntr += readl(mmcaddr + MMC_RX_FPE_FRAG); +} + +const struct stmmac_mmc_ops dwmac_mmc_ops = { + .ctrl = dwmac_mmc_ctrl, + .intr_all_mask = dwmac_mmc_intr_all_mask, + .read = dwmac_mmc_read, +}; + +static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) +{ + u32 value = readl(mmcaddr + MMC_CNTRL); + + value |= (mode & 0x3F); + + writel(value, mmcaddr + MMC_CNTRL); +} + +static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr) +{ + writel(0x0, mmcaddr + MMC_RX_INTR_MASK); + writel(0x0, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK); +} + +static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest) +{ + u64 tmp = 0; + + tmp += readl(addr + reg); + tmp += ((u64 )readl(addr + reg + 0x4)) << 32; + if (tmp > GENMASK(31, 0)) + *dest = ~0x0; + else + *dest = *dest + tmp; +} + +/* This reads the MAC core counters (if actaully supported). + * by default the MMC core is programmed to reset each + * counter after a read. So all the field of the mmc struct + * have to be incremented. + */ +static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc) +{ + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_GB, + &mmc->mmc_tx_octetcount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_GB, + &mmc->mmc_tx_framecount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_G, + &mmc->mmc_tx_broadcastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_G, + &mmc->mmc_tx_multicastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_64OCT_GB, + &mmc->mmc_tx_64_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_65OCT_GB, + &mmc->mmc_tx_65_to_127_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_128OCT_GB, + &mmc->mmc_tx_128_to_255_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_256OCT_GB, + &mmc->mmc_tx_256_to_511_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_512OCT_GB, + &mmc->mmc_tx_512_to_1023_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_1024OCT_GB, + &mmc->mmc_tx_1024_to_max_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNI_PKT_GB, + &mmc->mmc_tx_unicast_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_GB, + &mmc->mmc_tx_multicast_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_GB, + &mmc->mmc_tx_broadcast_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNDER, + &mmc->mmc_tx_underflow_error); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_G, + &mmc->mmc_tx_octetcount_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_G, + &mmc->mmc_tx_framecount_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PAUSE, + &mmc->mmc_tx_pause_frame); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G, + &mmc->mmc_tx_vlan_frame_g); + + /* MMC RX counter registers */ + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB, + &mmc->mmc_rx_framecount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_GB, + &mmc->mmc_rx_octetcount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_G, + &mmc->mmc_rx_octetcount_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_BROAD_PKT_G, + &mmc->mmc_rx_broadcastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_MULTI_PKT_G, + &mmc->mmc_rx_multicastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR, + &mmc->mmc_rx_crc_error); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR, + &mmc->mmc_rx_crc_error); + mmc->mmc_rx_run_error += readl(mmcaddr + MMC_XGMAC_RX_RUNT_ERR); + mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_XGMAC_RX_JABBER_ERR); + mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_XGMAC_RX_UNDER); + mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_XGMAC_RX_OVER); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_64OCT_GB, + &mmc->mmc_rx_64_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_65OCT_GB, + &mmc->mmc_rx_65_to_127_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_128OCT_GB, + &mmc->mmc_rx_128_to_255_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_256OCT_GB, + &mmc->mmc_rx_256_to_511_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_512OCT_GB, + &mmc->mmc_rx_512_to_1023_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_1024OCT_GB, + &mmc->mmc_rx_1024_to_max_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UNI_PKT_G, + &mmc->mmc_rx_unicast_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_LENGTH_ERR, + &mmc->mmc_rx_length_error); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_RANGE, + &mmc->mmc_rx_autofrangetype); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PAUSE, + &mmc->mmc_rx_pause_frames); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_FIFOOVER_PKT, + &mmc->mmc_rx_fifo_overflow); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB, + &mmc->mmc_rx_vlan_frames_gb); + mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR); + + mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG); + mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ); + mmc->mmc_rx_packet_assembly_err_cntr += + readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR); + mmc->mmc_rx_packet_smd_err_cntr += + readl(mmcaddr + MMC_XGMAC_RX_PKT_SMD_ERR); + mmc->mmc_rx_packet_assembly_ok_cntr += + readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK); + mmc->mmc_rx_fpe_fragment_cntr += + readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG); +} + +const struct stmmac_mmc_ops dwxgmac_mmc_ops = { + .ctrl = dwxgmac_mmc_ctrl, + .intr_all_mask = dwxgmac_mmc_intr_all_mask, + .read = dwxgmac_mmc_read, +}; diff --git a/devices/stmmac/mmc_core-6.1-orig.c b/devices/stmmac/mmc_core-6.1-orig.c new file mode 100644 index 00000000..6a7c1d32 --- /dev/null +++ b/devices/stmmac/mmc_core-6.1-orig.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + DWMAC Management Counters + + Copyright (C) 2011 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include "hwif.h" +#include "mmc.h" + +/* MAC Management Counters register offset */ + +#define MMC_CNTRL 0x00 /* MMC Control */ +#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */ +#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */ +#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */ +#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */ +#define MMC_DEFAULT_MASK 0xffffffff + +/* MMC TX counter registers */ + +/* Note: + * _GB register stands for good and bad frames + * _G is for good only. + */ +#define MMC_TX_OCTETCOUNT_GB 0x14 +#define MMC_TX_FRAMECOUNT_GB 0x18 +#define MMC_TX_BROADCASTFRAME_G 0x1c +#define MMC_TX_MULTICASTFRAME_G 0x20 +#define MMC_TX_64_OCTETS_GB 0x24 +#define MMC_TX_65_TO_127_OCTETS_GB 0x28 +#define MMC_TX_128_TO_255_OCTETS_GB 0x2c +#define MMC_TX_256_TO_511_OCTETS_GB 0x30 +#define MMC_TX_512_TO_1023_OCTETS_GB 0x34 +#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38 +#define MMC_TX_UNICAST_GB 0x3c +#define MMC_TX_MULTICAST_GB 0x40 +#define MMC_TX_BROADCAST_GB 0x44 +#define MMC_TX_UNDERFLOW_ERROR 0x48 +#define MMC_TX_SINGLECOL_G 0x4c +#define MMC_TX_MULTICOL_G 0x50 +#define MMC_TX_DEFERRED 0x54 +#define MMC_TX_LATECOL 0x58 +#define MMC_TX_EXESSCOL 0x5c +#define MMC_TX_CARRIER_ERROR 0x60 +#define MMC_TX_OCTETCOUNT_G 0x64 +#define MMC_TX_FRAMECOUNT_G 0x68 +#define MMC_TX_EXCESSDEF 0x6c +#define MMC_TX_PAUSE_FRAME 0x70 +#define MMC_TX_VLAN_FRAME_G 0x74 + +/* MMC RX counter registers */ +#define MMC_RX_FRAMECOUNT_GB 0x80 +#define MMC_RX_OCTETCOUNT_GB 0x84 +#define MMC_RX_OCTETCOUNT_G 0x88 +#define MMC_RX_BROADCASTFRAME_G 0x8c +#define MMC_RX_MULTICASTFRAME_G 0x90 +#define MMC_RX_CRC_ERROR 0x94 +#define MMC_RX_ALIGN_ERROR 0x98 +#define MMC_RX_RUN_ERROR 0x9C +#define MMC_RX_JABBER_ERROR 0xA0 +#define MMC_RX_UNDERSIZE_G 0xA4 +#define MMC_RX_OVERSIZE_G 0xA8 +#define MMC_RX_64_OCTETS_GB 0xAC +#define MMC_RX_65_TO_127_OCTETS_GB 0xb0 +#define MMC_RX_128_TO_255_OCTETS_GB 0xb4 +#define MMC_RX_256_TO_511_OCTETS_GB 0xb8 +#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc +#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0 +#define MMC_RX_UNICAST_G 0xc4 +#define MMC_RX_LENGTH_ERROR 0xc8 +#define MMC_RX_AUTOFRANGETYPE 0xcc +#define MMC_RX_PAUSE_FRAMES 0xd0 +#define MMC_RX_FIFO_OVERFLOW 0xd4 +#define MMC_RX_VLAN_FRAMES_GB 0xd8 +#define MMC_RX_WATCHDOG_ERROR 0xdc +/* IPC*/ +#define MMC_RX_IPC_INTR_MASK 0x100 +#define MMC_RX_IPC_INTR 0x108 +/* IPv4*/ +#define MMC_RX_IPV4_GD 0x110 +#define MMC_RX_IPV4_HDERR 0x114 +#define MMC_RX_IPV4_NOPAY 0x118 +#define MMC_RX_IPV4_FRAG 0x11C +#define MMC_RX_IPV4_UDSBL 0x120 + +#define MMC_RX_IPV4_GD_OCTETS 0x150 +#define MMC_RX_IPV4_HDERR_OCTETS 0x154 +#define MMC_RX_IPV4_NOPAY_OCTETS 0x158 +#define MMC_RX_IPV4_FRAG_OCTETS 0x15c +#define MMC_RX_IPV4_UDSBL_OCTETS 0x160 + +/* IPV6*/ +#define MMC_RX_IPV6_GD_OCTETS 0x164 +#define MMC_RX_IPV6_HDERR_OCTETS 0x168 +#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c + +#define MMC_RX_IPV6_GD 0x124 +#define MMC_RX_IPV6_HDERR 0x128 +#define MMC_RX_IPV6_NOPAY 0x12c + +/* Protocols*/ +#define MMC_RX_UDP_GD 0x130 +#define MMC_RX_UDP_ERR 0x134 +#define MMC_RX_TCP_GD 0x138 +#define MMC_RX_TCP_ERR 0x13c +#define MMC_RX_ICMP_GD 0x140 +#define MMC_RX_ICMP_ERR 0x144 + +#define MMC_RX_UDP_GD_OCTETS 0x170 +#define MMC_RX_UDP_ERR_OCTETS 0x174 +#define MMC_RX_TCP_GD_OCTETS 0x178 +#define MMC_RX_TCP_ERR_OCTETS 0x17c +#define MMC_RX_ICMP_GD_OCTETS 0x180 +#define MMC_RX_ICMP_ERR_OCTETS 0x184 + +#define MMC_TX_FPE_FRAG 0x1a8 +#define MMC_TX_HOLD_REQ 0x1ac +#define MMC_RX_PKT_ASSEMBLY_ERR 0x1c8 +#define MMC_RX_PKT_SMD_ERR 0x1cc +#define MMC_RX_PKT_ASSEMBLY_OK 0x1d0 +#define MMC_RX_FPE_FRAG 0x1d4 + +/* XGMAC MMC Registers */ +#define MMC_XGMAC_TX_OCTET_GB 0x14 +#define MMC_XGMAC_TX_PKT_GB 0x1c +#define MMC_XGMAC_TX_BROAD_PKT_G 0x24 +#define MMC_XGMAC_TX_MULTI_PKT_G 0x2c +#define MMC_XGMAC_TX_64OCT_GB 0x34 +#define MMC_XGMAC_TX_65OCT_GB 0x3c +#define MMC_XGMAC_TX_128OCT_GB 0x44 +#define MMC_XGMAC_TX_256OCT_GB 0x4c +#define MMC_XGMAC_TX_512OCT_GB 0x54 +#define MMC_XGMAC_TX_1024OCT_GB 0x5c +#define MMC_XGMAC_TX_UNI_PKT_GB 0x64 +#define MMC_XGMAC_TX_MULTI_PKT_GB 0x6c +#define MMC_XGMAC_TX_BROAD_PKT_GB 0x74 +#define MMC_XGMAC_TX_UNDER 0x7c +#define MMC_XGMAC_TX_OCTET_G 0x84 +#define MMC_XGMAC_TX_PKT_G 0x8c +#define MMC_XGMAC_TX_PAUSE 0x94 +#define MMC_XGMAC_TX_VLAN_PKT_G 0x9c +#define MMC_XGMAC_TX_LPI_USEC 0xa4 +#define MMC_XGMAC_TX_LPI_TRAN 0xa8 + +#define MMC_XGMAC_RX_PKT_GB 0x100 +#define MMC_XGMAC_RX_OCTET_GB 0x108 +#define MMC_XGMAC_RX_OCTET_G 0x110 +#define MMC_XGMAC_RX_BROAD_PKT_G 0x118 +#define MMC_XGMAC_RX_MULTI_PKT_G 0x120 +#define MMC_XGMAC_RX_CRC_ERR 0x128 +#define MMC_XGMAC_RX_RUNT_ERR 0x130 +#define MMC_XGMAC_RX_JABBER_ERR 0x134 +#define MMC_XGMAC_RX_UNDER 0x138 +#define MMC_XGMAC_RX_OVER 0x13c +#define MMC_XGMAC_RX_64OCT_GB 0x140 +#define MMC_XGMAC_RX_65OCT_GB 0x148 +#define MMC_XGMAC_RX_128OCT_GB 0x150 +#define MMC_XGMAC_RX_256OCT_GB 0x158 +#define MMC_XGMAC_RX_512OCT_GB 0x160 +#define MMC_XGMAC_RX_1024OCT_GB 0x168 +#define MMC_XGMAC_RX_UNI_PKT_G 0x170 +#define MMC_XGMAC_RX_LENGTH_ERR 0x178 +#define MMC_XGMAC_RX_RANGE 0x180 +#define MMC_XGMAC_RX_PAUSE 0x188 +#define MMC_XGMAC_RX_FIFOOVER_PKT 0x190 +#define MMC_XGMAC_RX_VLAN_PKT_GB 0x198 +#define MMC_XGMAC_RX_WATCHDOG_ERR 0x1a0 +#define MMC_XGMAC_RX_LPI_USEC 0x1a4 +#define MMC_XGMAC_RX_LPI_TRAN 0x1a8 +#define MMC_XGMAC_RX_DISCARD_PKT_GB 0x1ac +#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4 +#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc + +#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204 +#define MMC_XGMAC_TX_FPE_FRAG 0x208 +#define MMC_XGMAC_TX_HOLD_REQ 0x20c +#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224 +#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228 +#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c +#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 +#define MMC_XGMAC_RX_FPE_FRAG 0x234 +#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c + +static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) +{ + u32 value = readl(mmcaddr + MMC_CNTRL); + + value |= (mode & 0x3F); + + writel(value, mmcaddr + MMC_CNTRL); + + pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", + MMC_CNTRL, value); +} + +/* To mask all interrupts.*/ +static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr) +{ + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK); +} + +/* This reads the MAC core counters (if actaully supported). + * by default the MMC core is programmed to reset each + * counter after a read. So all the field of the mmc struct + * have to be incremented. + */ +static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc) +{ + mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB); + mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB); + mmc->mmc_tx_broadcastframe_g += readl(mmcaddr + + MMC_TX_BROADCASTFRAME_G); + mmc->mmc_tx_multicastframe_g += readl(mmcaddr + + MMC_TX_MULTICASTFRAME_G); + mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB); + mmc->mmc_tx_65_to_127_octets_gb += + readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB); + mmc->mmc_tx_128_to_255_octets_gb += + readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB); + mmc->mmc_tx_256_to_511_octets_gb += + readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB); + mmc->mmc_tx_512_to_1023_octets_gb += + readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB); + mmc->mmc_tx_1024_to_max_octets_gb += + readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB); + mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB); + mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB); + mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR); + mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G); + mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G); + mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED); + mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL); + mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL); + mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR); + mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G); + mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G); + mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF); + mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME); + mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G); + + /* MMC RX counter registers */ + mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB); + mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB); + mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G); + mmc->mmc_rx_broadcastframe_g += readl(mmcaddr + + MMC_RX_BROADCASTFRAME_G); + mmc->mmc_rx_multicastframe_g += readl(mmcaddr + + MMC_RX_MULTICASTFRAME_G); + mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR); + mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR); + mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR); + mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR); + mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G); + mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G); + mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB); + mmc->mmc_rx_65_to_127_octets_gb += + readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB); + mmc->mmc_rx_128_to_255_octets_gb += + readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB); + mmc->mmc_rx_256_to_511_octets_gb += + readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB); + mmc->mmc_rx_512_to_1023_octets_gb += + readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB); + mmc->mmc_rx_1024_to_max_octets_gb += + readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G); + mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR); + mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE); + mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES); + mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW); + mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB); + mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR); + /* IPC */ + mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK); + mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR); + /* IPv4 */ + mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD); + mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR); + mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY); + mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG); + mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL); + + mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS); + mmc->mmc_rx_ipv4_hderr_octets += + readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS); + mmc->mmc_rx_ipv4_nopay_octets += + readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS); + mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr + + MMC_RX_IPV4_FRAG_OCTETS); + mmc->mmc_rx_ipv4_udsbl_octets += + readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS); + + /* IPV6 */ + mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS); + mmc->mmc_rx_ipv6_hderr_octets += + readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS); + mmc->mmc_rx_ipv6_nopay_octets += + readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS); + + mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD); + mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR); + mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY); + + /* Protocols */ + mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD); + mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR); + mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD); + mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR); + mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD); + mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR); + + mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS); + mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS); + mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS); + mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS); + mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS); + mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS); + + mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_TX_FPE_FRAG); + mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_TX_HOLD_REQ); + mmc->mmc_rx_packet_assembly_err_cntr += + readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_ERR); + mmc->mmc_rx_packet_smd_err_cntr += readl(mmcaddr + MMC_RX_PKT_SMD_ERR); + mmc->mmc_rx_packet_assembly_ok_cntr += + readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_OK); + mmc->mmc_rx_fpe_fragment_cntr += readl(mmcaddr + MMC_RX_FPE_FRAG); +} + +const struct stmmac_mmc_ops dwmac_mmc_ops = { + .ctrl = dwmac_mmc_ctrl, + .intr_all_mask = dwmac_mmc_intr_all_mask, + .read = dwmac_mmc_read, +}; + +static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) +{ + u32 value = readl(mmcaddr + MMC_CNTRL); + + value |= (mode & 0x3F); + + writel(value, mmcaddr + MMC_CNTRL); +} + +static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr) +{ + writel(0x0, mmcaddr + MMC_RX_INTR_MASK); + writel(0x0, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK); +} + +static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest) +{ + u64 tmp = 0; + + tmp += readl(addr + reg); + tmp += ((u64 )readl(addr + reg + 0x4)) << 32; + if (tmp > GENMASK(31, 0)) + *dest = ~0x0; + else + *dest = *dest + tmp; +} + +/* This reads the MAC core counters (if actaully supported). + * by default the MMC core is programmed to reset each + * counter after a read. So all the field of the mmc struct + * have to be incremented. + */ +static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc) +{ + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_GB, + &mmc->mmc_tx_octetcount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_GB, + &mmc->mmc_tx_framecount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_G, + &mmc->mmc_tx_broadcastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_G, + &mmc->mmc_tx_multicastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_64OCT_GB, + &mmc->mmc_tx_64_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_65OCT_GB, + &mmc->mmc_tx_65_to_127_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_128OCT_GB, + &mmc->mmc_tx_128_to_255_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_256OCT_GB, + &mmc->mmc_tx_256_to_511_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_512OCT_GB, + &mmc->mmc_tx_512_to_1023_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_1024OCT_GB, + &mmc->mmc_tx_1024_to_max_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNI_PKT_GB, + &mmc->mmc_tx_unicast_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_GB, + &mmc->mmc_tx_multicast_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_GB, + &mmc->mmc_tx_broadcast_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNDER, + &mmc->mmc_tx_underflow_error); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_G, + &mmc->mmc_tx_octetcount_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_G, + &mmc->mmc_tx_framecount_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PAUSE, + &mmc->mmc_tx_pause_frame); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G, + &mmc->mmc_tx_vlan_frame_g); + + /* MMC RX counter registers */ + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB, + &mmc->mmc_rx_framecount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_GB, + &mmc->mmc_rx_octetcount_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_G, + &mmc->mmc_rx_octetcount_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_BROAD_PKT_G, + &mmc->mmc_rx_broadcastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_MULTI_PKT_G, + &mmc->mmc_rx_multicastframe_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR, + &mmc->mmc_rx_crc_error); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR, + &mmc->mmc_rx_crc_error); + mmc->mmc_rx_run_error += readl(mmcaddr + MMC_XGMAC_RX_RUNT_ERR); + mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_XGMAC_RX_JABBER_ERR); + mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_XGMAC_RX_UNDER); + mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_XGMAC_RX_OVER); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_64OCT_GB, + &mmc->mmc_rx_64_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_65OCT_GB, + &mmc->mmc_rx_65_to_127_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_128OCT_GB, + &mmc->mmc_rx_128_to_255_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_256OCT_GB, + &mmc->mmc_rx_256_to_511_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_512OCT_GB, + &mmc->mmc_rx_512_to_1023_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_1024OCT_GB, + &mmc->mmc_rx_1024_to_max_octets_gb); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UNI_PKT_G, + &mmc->mmc_rx_unicast_g); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_LENGTH_ERR, + &mmc->mmc_rx_length_error); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_RANGE, + &mmc->mmc_rx_autofrangetype); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PAUSE, + &mmc->mmc_rx_pause_frames); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_FIFOOVER_PKT, + &mmc->mmc_rx_fifo_overflow); + dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB, + &mmc->mmc_rx_vlan_frames_gb); + mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR); + + mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG); + mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ); + mmc->mmc_rx_packet_assembly_err_cntr += + readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR); + mmc->mmc_rx_packet_smd_err_cntr += + readl(mmcaddr + MMC_XGMAC_RX_PKT_SMD_ERR); + mmc->mmc_rx_packet_assembly_ok_cntr += + readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK); + mmc->mmc_rx_fpe_fragment_cntr += + readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG); +} + +const struct stmmac_mmc_ops dwxgmac_mmc_ops = { + .ctrl = dwxgmac_mmc_ctrl, + .intr_all_mask = dwxgmac_mmc_intr_all_mask, + .read = dwxgmac_mmc_read, +}; diff --git a/devices/stmmac/norm_desc-6.1-ethercat.c b/devices/stmmac/norm_desc-6.1-ethercat.c new file mode 100644 index 00000000..ec8e8ef1 --- /dev/null +++ b/devices/stmmac/norm_desc-6.1-ethercat.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This contains the functions to handle the normal descriptors. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "common-6.1-ethercat.h" +#include "descs_com-6.1-ethercat.h" + +static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes0 = le32_to_cpu(p->des0); + unsigned int tdes1 = le32_to_cpu(p->des1); + int ret = tx_done; + + /* Get tx owner first */ + if (unlikely(tdes0 & TDES0_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes1 & TDES1_LAST_SEGMENT))) + return tx_not_ls; + + if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) { + if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) { + x->tx_underflow++; + stats->tx_fifo_errors++; + } + if (unlikely(tdes0 & TDES0_NO_CARRIER)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) || + (tdes0 & TDES0_EXCESSIVE_COLLISIONS) || + (tdes0 & TDES0_LATE_COLLISION))) { + unsigned int collisions; + + collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3; + stats->collisions += collisions; + } + ret = tx_err; + } + + if (tdes0 & TDES0_VLAN_FRAME) + x->tx_vlan++; + + if (unlikely(tdes0 & TDES0_DEFERRED)) + x->tx_deferred++; + + return ret; +} + +static int ndesc_get_tx_len(struct dma_desc *p) +{ + return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK); +} + +/* This function verifies if each incoming frame has some errors + * and, if required, updates the multicast statistics. + * In case of success, it returns good_frame because the GMAC device + * is supposed to be able to compute the csum in HW. */ +static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = good_frame; + unsigned int rdes0 = le32_to_cpu(p->des0); + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(rdes0 & RDES0_OWN)) + return dma_own; + + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { + stats->rx_length_errors++; + return discard_frame; + } + + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { + if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) + x->rx_desc++; + if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) + x->sa_filter_fail++; + if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR)) + x->overflow_error++; + if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR)) + x->ipc_csum_error++; + if (unlikely(rdes0 & RDES0_COLLISION)) { + x->rx_collision++; + stats->collisions++; + } + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { + x->rx_crc_errors++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + if (unlikely(rdes0 & RDES0_DRIBBLING)) + x->dribbling_bit++; + + if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) { + x->rx_length++; + ret = discard_frame; + } + if (unlikely(rdes0 & RDES0_MII_ERROR)) { + x->rx_mii++; + ret = discard_frame; + } +#ifdef STMMAC_VLAN_TAG_USED + if (rdes0 & RDES0_VLAN_TAG) + x->vlan_tag++; +#endif + return ret; +} + +static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, + int end, int bfsize) +{ + int bfsize1; + + p->des0 |= cpu_to_le32(RDES0_OWN); + + bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK); + + if (mode == STMMAC_CHAIN_MODE) + ndesc_rx_set_on_chain(p, end); + else + ndesc_rx_set_on_ring(p, end, bfsize); + + if (disable_rx_ic) + p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); +} + +static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 &= cpu_to_le32(~TDES0_OWN); + if (mode == STMMAC_CHAIN_MODE) + ndesc_tx_set_on_chain(p); + else + ndesc_end_tx_desc_on_ring(p, end); +} + +static int ndesc_get_tx_owner(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31; +} + +static void ndesc_set_tx_owner(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(TDES0_OWN); +} + +static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic) +{ + p->des0 |= cpu_to_le32(RDES0_OWN); +} + +static int ndesc_get_tx_ls(struct dma_desc *p) +{ + return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30; +} + +static void ndesc_release_tx_desc(struct dma_desc *p, int mode) +{ + int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25; + + memset(p, 0, offsetof(struct dma_desc, des2)); + if (mode == STMMAC_CHAIN_MODE) + ndesc_tx_set_on_chain(p); + else + ndesc_end_tx_desc_on_ring(p, ter); +} + +static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls, unsigned int tot_pkt_len) +{ + unsigned int tdes1 = le32_to_cpu(p->des1); + + if (is_fs) + tdes1 |= TDES1_FIRST_SEGMENT; + else + tdes1 &= ~TDES1_FIRST_SEGMENT; + + if (likely(csum_flag)) + tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT; + else + tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT); + + if (ls) + tdes1 |= TDES1_LAST_SEGMENT; + + p->des1 = cpu_to_le32(tdes1); + + if (mode == STMMAC_CHAIN_MODE) + norm_set_tx_desc_len_on_chain(p, len); + else + norm_set_tx_desc_len_on_ring(p, len); + + if (tx_own) + p->des0 |= cpu_to_le32(TDES0_OWN); +} + +static void ndesc_set_tx_ic(struct dma_desc *p) +{ + p->des1 |= cpu_to_le32(TDES1_INTERRUPT); +} + +static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) +{ + unsigned int csum = 0; + + /* The type-1 checksum offload engines append the checksum at + * the end of frame and the two bytes of checksum are added in + * the length. + * Adjust for that in the framelen for type-1 checksum offload + * engines + */ + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + csum = 2; + + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK) + >> RDES0_FRAME_LEN_SHIFT) - + csum); + +} + +static void ndesc_enable_tx_timestamp(struct dma_desc *p) +{ + p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE); +} + +static int ndesc_get_tx_timestamp_status(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17; +} + +static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + + ns = le32_to_cpu(p->des2); + /* convert high/sec time stamp value to nanosecond */ + ns += le32_to_cpu(p->des3) * 1000000000ULL; + + *ts = ns; +} + +static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + + if ((le32_to_cpu(p->des2) == 0xffffffff) && + (le32_to_cpu(p->des3) == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else + return 1; +} + +static void ndesc_display_ring(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size) +{ + struct dma_desc *p = (struct dma_desc *)head; + dma_addr_t dma_addr; + int i; + + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + u64 x; + dma_addr = dma_rx_phy + i * sizeof(*p); + + x = *(u64 *)p; + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x", + i, &dma_addr, + (unsigned int)x, (unsigned int)(x >> 32), + p->des2, p->des3); + p++; + } + pr_info("\n"); +} + +static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(addr); +} + +static void ndesc_clear(struct dma_desc *p) +{ + p->des2 = 0; +} + +const struct stmmac_desc_ops ndesc_ops = { + .tx_status = ndesc_get_tx_status, + .rx_status = ndesc_get_rx_status, + .get_tx_len = ndesc_get_tx_len, + .init_rx_desc = ndesc_init_rx_desc, + .init_tx_desc = ndesc_init_tx_desc, + .get_tx_owner = ndesc_get_tx_owner, + .release_tx_desc = ndesc_release_tx_desc, + .prepare_tx_desc = ndesc_prepare_tx_desc, + .set_tx_ic = ndesc_set_tx_ic, + .get_tx_ls = ndesc_get_tx_ls, + .set_tx_owner = ndesc_set_tx_owner, + .set_rx_owner = ndesc_set_rx_owner, + .get_rx_frame_len = ndesc_get_rx_frame_len, + .enable_tx_timestamp = ndesc_enable_tx_timestamp, + .get_tx_timestamp_status = ndesc_get_tx_timestamp_status, + .get_timestamp = ndesc_get_timestamp, + .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, + .display_ring = ndesc_display_ring, + .set_addr = ndesc_set_addr, + .clear = ndesc_clear, +}; diff --git a/devices/stmmac/norm_desc-6.1-orig.c b/devices/stmmac/norm_desc-6.1-orig.c new file mode 100644 index 00000000..e3da4da2 --- /dev/null +++ b/devices/stmmac/norm_desc-6.1-orig.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This contains the functions to handle the normal descriptors. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "common.h" +#include "descs_com.h" + +static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes0 = le32_to_cpu(p->des0); + unsigned int tdes1 = le32_to_cpu(p->des1); + int ret = tx_done; + + /* Get tx owner first */ + if (unlikely(tdes0 & TDES0_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes1 & TDES1_LAST_SEGMENT))) + return tx_not_ls; + + if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) { + if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) { + x->tx_underflow++; + stats->tx_fifo_errors++; + } + if (unlikely(tdes0 & TDES0_NO_CARRIER)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) || + (tdes0 & TDES0_EXCESSIVE_COLLISIONS) || + (tdes0 & TDES0_LATE_COLLISION))) { + unsigned int collisions; + + collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3; + stats->collisions += collisions; + } + ret = tx_err; + } + + if (tdes0 & TDES0_VLAN_FRAME) + x->tx_vlan++; + + if (unlikely(tdes0 & TDES0_DEFERRED)) + x->tx_deferred++; + + return ret; +} + +static int ndesc_get_tx_len(struct dma_desc *p) +{ + return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK); +} + +/* This function verifies if each incoming frame has some errors + * and, if required, updates the multicast statistics. + * In case of success, it returns good_frame because the GMAC device + * is supposed to be able to compute the csum in HW. */ +static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = good_frame; + unsigned int rdes0 = le32_to_cpu(p->des0); + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(rdes0 & RDES0_OWN)) + return dma_own; + + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { + stats->rx_length_errors++; + return discard_frame; + } + + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { + if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) + x->rx_desc++; + if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) + x->sa_filter_fail++; + if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR)) + x->overflow_error++; + if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR)) + x->ipc_csum_error++; + if (unlikely(rdes0 & RDES0_COLLISION)) { + x->rx_collision++; + stats->collisions++; + } + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { + x->rx_crc_errors++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + if (unlikely(rdes0 & RDES0_DRIBBLING)) + x->dribbling_bit++; + + if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) { + x->rx_length++; + ret = discard_frame; + } + if (unlikely(rdes0 & RDES0_MII_ERROR)) { + x->rx_mii++; + ret = discard_frame; + } +#ifdef STMMAC_VLAN_TAG_USED + if (rdes0 & RDES0_VLAN_TAG) + x->vlan_tag++; +#endif + return ret; +} + +static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, + int end, int bfsize) +{ + int bfsize1; + + p->des0 |= cpu_to_le32(RDES0_OWN); + + bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK); + + if (mode == STMMAC_CHAIN_MODE) + ndesc_rx_set_on_chain(p, end); + else + ndesc_rx_set_on_ring(p, end, bfsize); + + if (disable_rx_ic) + p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); +} + +static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 &= cpu_to_le32(~TDES0_OWN); + if (mode == STMMAC_CHAIN_MODE) + ndesc_tx_set_on_chain(p); + else + ndesc_end_tx_desc_on_ring(p, end); +} + +static int ndesc_get_tx_owner(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31; +} + +static void ndesc_set_tx_owner(struct dma_desc *p) +{ + p->des0 |= cpu_to_le32(TDES0_OWN); +} + +static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic) +{ + p->des0 |= cpu_to_le32(RDES0_OWN); +} + +static int ndesc_get_tx_ls(struct dma_desc *p) +{ + return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30; +} + +static void ndesc_release_tx_desc(struct dma_desc *p, int mode) +{ + int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25; + + memset(p, 0, offsetof(struct dma_desc, des2)); + if (mode == STMMAC_CHAIN_MODE) + ndesc_tx_set_on_chain(p); + else + ndesc_end_tx_desc_on_ring(p, ter); +} + +static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls, unsigned int tot_pkt_len) +{ + unsigned int tdes1 = le32_to_cpu(p->des1); + + if (is_fs) + tdes1 |= TDES1_FIRST_SEGMENT; + else + tdes1 &= ~TDES1_FIRST_SEGMENT; + + if (likely(csum_flag)) + tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT; + else + tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT); + + if (ls) + tdes1 |= TDES1_LAST_SEGMENT; + + p->des1 = cpu_to_le32(tdes1); + + if (mode == STMMAC_CHAIN_MODE) + norm_set_tx_desc_len_on_chain(p, len); + else + norm_set_tx_desc_len_on_ring(p, len); + + if (tx_own) + p->des0 |= cpu_to_le32(TDES0_OWN); +} + +static void ndesc_set_tx_ic(struct dma_desc *p) +{ + p->des1 |= cpu_to_le32(TDES1_INTERRUPT); +} + +static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) +{ + unsigned int csum = 0; + + /* The type-1 checksum offload engines append the checksum at + * the end of frame and the two bytes of checksum are added in + * the length. + * Adjust for that in the framelen for type-1 checksum offload + * engines + */ + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + csum = 2; + + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK) + >> RDES0_FRAME_LEN_SHIFT) - + csum); + +} + +static void ndesc_enable_tx_timestamp(struct dma_desc *p) +{ + p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE); +} + +static int ndesc_get_tx_timestamp_status(struct dma_desc *p) +{ + return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17; +} + +static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + + ns = le32_to_cpu(p->des2); + /* convert high/sec time stamp value to nanosecond */ + ns += le32_to_cpu(p->des3) * 1000000000ULL; + + *ts = ns; +} + +static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + + if ((le32_to_cpu(p->des2) == 0xffffffff) && + (le32_to_cpu(p->des3) == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else + return 1; +} + +static void ndesc_display_ring(void *head, unsigned int size, bool rx, + dma_addr_t dma_rx_phy, unsigned int desc_size) +{ + struct dma_desc *p = (struct dma_desc *)head; + dma_addr_t dma_addr; + int i; + + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + u64 x; + dma_addr = dma_rx_phy + i * sizeof(*p); + + x = *(u64 *)p; + pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x", + i, &dma_addr, + (unsigned int)x, (unsigned int)(x >> 32), + p->des2, p->des3); + p++; + } + pr_info("\n"); +} + +static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(addr); +} + +static void ndesc_clear(struct dma_desc *p) +{ + p->des2 = 0; +} + +const struct stmmac_desc_ops ndesc_ops = { + .tx_status = ndesc_get_tx_status, + .rx_status = ndesc_get_rx_status, + .get_tx_len = ndesc_get_tx_len, + .init_rx_desc = ndesc_init_rx_desc, + .init_tx_desc = ndesc_init_tx_desc, + .get_tx_owner = ndesc_get_tx_owner, + .release_tx_desc = ndesc_release_tx_desc, + .prepare_tx_desc = ndesc_prepare_tx_desc, + .set_tx_ic = ndesc_set_tx_ic, + .get_tx_ls = ndesc_get_tx_ls, + .set_tx_owner = ndesc_set_tx_owner, + .set_rx_owner = ndesc_set_rx_owner, + .get_rx_frame_len = ndesc_get_rx_frame_len, + .enable_tx_timestamp = ndesc_enable_tx_timestamp, + .get_tx_timestamp_status = ndesc_get_tx_timestamp_status, + .get_timestamp = ndesc_get_timestamp, + .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, + .display_ring = ndesc_display_ring, + .set_addr = ndesc_set_addr, + .clear = ndesc_clear, +}; diff --git a/devices/stmmac/ring_mode-6.1-ethercat.c b/devices/stmmac/ring_mode-6.1-ethercat.c new file mode 100644 index 00000000..35d93159 --- /dev/null +++ b/devices/stmmac/ring_mode-6.1-ethercat.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + Specialised functions for managing Ring mode + + Copyright(C) 2011 STMicroelectronics Ltd + + It defines all the functions used to handle the normal/enhanced + descriptors in case of the DMA is configured to work in chained or + in ring mode. + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include "stmmac-6.1-ethercat.h" + +static int jumbo_frm(void *p, struct sk_buff *skb, int csum) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; + unsigned int nopaged_len = skb_headlen(skb); + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->cur_tx; + unsigned int bmax, len, des2; + struct dma_desc *desc; + + if (priv->extend_desc) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else + desc = tx_q->dma_tx + entry; + + if (priv->plat->enh_desc) + bmax = BUF_SIZE_8KiB; + else + bmax = BUF_SIZE_2KiB; + + len = nopaged_len - bmax; + + if (nopaged_len > BUF_SIZE_8KiB) { + + des2 = dma_map_single(priv->device, skb->data, bmax, + DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = bmax; + tx_q->tx_skbuff_dma[entry].is_jumbo = true; + + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, + STMMAC_RING_MODE, 0, false, skb->len); + tx_q->tx_skbuff[entry] = NULL; + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + + if (priv->extend_desc) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else + desc = tx_q->dma_tx + entry; + + des2 = dma_map_single(priv->device, skb->data + bmax, len, + DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = len; + tx_q->tx_skbuff_dma[entry].is_jumbo = true; + + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, + STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb), + skb->len); + } else { + des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = nopaged_len; + tx_q->tx_skbuff_dma[entry].is_jumbo = true; + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, + STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb), + skb->len); + } + + tx_q->cur_tx = entry; + + return entry; +} + +static unsigned int is_jumbo_frm(int len, int enh_desc) +{ + unsigned int ret = 0; + + if (len >= BUF_SIZE_4KiB) + ret = 1; + + return ret; +} + +static void refill_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_rx_queue *rx_q = priv_ptr; + struct stmmac_priv *priv = rx_q->priv_data; + + /* Fill DES3 in case of RING mode */ + if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB) + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); +} + +/* In ring mode we need to fill the desc3 because it is used as buffer */ +static void init_desc3(struct dma_desc *p) +{ + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); +} + +static void clean_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->dirty_tx; + + /* des3 is only used for jumbo frames tx or time stamping */ + if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo || + (tx_q->tx_skbuff_dma[entry].last_segment && + !priv->extend_desc && priv->hwts_tx_en))) + p->des3 = 0; +} + +static int set_16kib_bfsize(int mtu) +{ + int ret = 0; + if (unlikely(mtu > BUF_SIZE_8KiB)) + ret = BUF_SIZE_16KiB; + return ret; +} + +const struct stmmac_mode_ops ring_mode_ops = { + .is_jumbo_frm = is_jumbo_frm, + .jumbo_frm = jumbo_frm, + .refill_desc3 = refill_desc3, + .init_desc3 = init_desc3, + .clean_desc3 = clean_desc3, + .set_16kib_bfsize = set_16kib_bfsize, +}; diff --git a/devices/stmmac/ring_mode-6.1-orig.c b/devices/stmmac/ring_mode-6.1-orig.c new file mode 100644 index 00000000..2b5b17d8 --- /dev/null +++ b/devices/stmmac/ring_mode-6.1-orig.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + Specialised functions for managing Ring mode + + Copyright(C) 2011 STMicroelectronics Ltd + + It defines all the functions used to handle the normal/enhanced + descriptors in case of the DMA is configured to work in chained or + in ring mode. + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include "stmmac.h" + +static int jumbo_frm(void *p, struct sk_buff *skb, int csum) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; + unsigned int nopaged_len = skb_headlen(skb); + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->cur_tx; + unsigned int bmax, len, des2; + struct dma_desc *desc; + + if (priv->extend_desc) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else + desc = tx_q->dma_tx + entry; + + if (priv->plat->enh_desc) + bmax = BUF_SIZE_8KiB; + else + bmax = BUF_SIZE_2KiB; + + len = nopaged_len - bmax; + + if (nopaged_len > BUF_SIZE_8KiB) { + + des2 = dma_map_single(priv->device, skb->data, bmax, + DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = bmax; + tx_q->tx_skbuff_dma[entry].is_jumbo = true; + + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, + STMMAC_RING_MODE, 0, false, skb->len); + tx_q->tx_skbuff[entry] = NULL; + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + + if (priv->extend_desc) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else + desc = tx_q->dma_tx + entry; + + des2 = dma_map_single(priv->device, skb->data + bmax, len, + DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = len; + tx_q->tx_skbuff_dma[entry].is_jumbo = true; + + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, + STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb), + skb->len); + } else { + des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + desc->des2 = cpu_to_le32(des2); + if (dma_mapping_error(priv->device, des2)) + return -1; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = nopaged_len; + tx_q->tx_skbuff_dma[entry].is_jumbo = true; + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, + STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb), + skb->len); + } + + tx_q->cur_tx = entry; + + return entry; +} + +static unsigned int is_jumbo_frm(int len, int enh_desc) +{ + unsigned int ret = 0; + + if (len >= BUF_SIZE_4KiB) + ret = 1; + + return ret; +} + +static void refill_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_rx_queue *rx_q = priv_ptr; + struct stmmac_priv *priv = rx_q->priv_data; + + /* Fill DES3 in case of RING mode */ + if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB) + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); +} + +/* In ring mode we need to fill the desc3 because it is used as buffer */ +static void init_desc3(struct dma_desc *p) +{ + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); +} + +static void clean_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->dirty_tx; + + /* des3 is only used for jumbo frames tx or time stamping */ + if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo || + (tx_q->tx_skbuff_dma[entry].last_segment && + !priv->extend_desc && priv->hwts_tx_en))) + p->des3 = 0; +} + +static int set_16kib_bfsize(int mtu) +{ + int ret = 0; + if (unlikely(mtu > BUF_SIZE_8KiB)) + ret = BUF_SIZE_16KiB; + return ret; +} + +const struct stmmac_mode_ops ring_mode_ops = { + .is_jumbo_frm = is_jumbo_frm, + .jumbo_frm = jumbo_frm, + .refill_desc3 = refill_desc3, + .init_desc3 = init_desc3, + .clean_desc3 = clean_desc3, + .set_16kib_bfsize = set_16kib_bfsize, +}; diff --git a/devices/stmmac/stmmac-6.1-ethercat.h b/devices/stmmac/stmmac-6.1-ethercat.h new file mode 100644 index 00000000..2a161788 --- /dev/null +++ b/devices/stmmac/stmmac-6.1-ethercat.h @@ -0,0 +1,425 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __STMMAC_H__ +#define __STMMAC_H__ + +#define STMMAC_RESOURCE_NAME "ec_stmmaceth" + +#include +#include +#include +#include +#include +#include +#include "common-6.1-ethercat.h" +#include +#include +#include +#include +#include +#include + +/* EtherCAT header file */ +#include "../ecdev.h" + +int __cold stmmac_init(void); +void __cold stmmac_exit(void); + +struct stmmac_resources { + void __iomem *addr; + u8 mac[ETH_ALEN]; + int wol_irq; + int lpi_irq; + int irq; + int sfty_ce_irq; + int sfty_ue_irq; + int rx_irq[MTL_MAX_RX_QUEUES]; + int tx_irq[MTL_MAX_TX_QUEUES]; +}; + +enum stmmac_txbuf_type { + STMMAC_TXBUF_T_SKB, + STMMAC_TXBUF_T_XDP_TX, + STMMAC_TXBUF_T_XDP_NDO, + STMMAC_TXBUF_T_XSK_TX, +}; + +struct stmmac_tx_info { + dma_addr_t buf; + bool map_as_page; + unsigned len; + bool last_segment; + bool is_jumbo; + enum stmmac_txbuf_type buf_type; +}; + +#define STMMAC_TBS_AVAIL BIT(0) +#define STMMAC_TBS_EN BIT(1) + +/* Frequently used values are kept adjacent for cache effect */ +struct stmmac_tx_queue { + u32 tx_count_frames; + int tbs; + struct hrtimer txtimer; + u32 queue_index; + struct stmmac_priv *priv_data; + struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; + struct dma_edesc *dma_entx; + struct dma_desc *dma_tx; + union { + struct sk_buff **tx_skbuff; + struct xdp_frame **xdpf; + }; + struct stmmac_tx_info *tx_skbuff_dma; + struct xsk_buff_pool *xsk_pool; + u32 xsk_frames_done; + unsigned int cur_tx; + unsigned int dirty_tx; + dma_addr_t dma_tx_phy; + dma_addr_t tx_tail_addr; + u32 mss; +}; + +struct stmmac_rx_buffer { + union { + struct { + struct page *page; + dma_addr_t addr; + __u32 page_offset; + }; + struct xdp_buff *xdp; + }; + struct page *sec_page; + dma_addr_t sec_addr; +}; + +struct stmmac_rx_queue { + u32 rx_count_frames; + u32 queue_index; + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; + struct page_pool *page_pool; + struct stmmac_rx_buffer *buf_pool; + struct stmmac_priv *priv_data; + struct dma_extended_desc *dma_erx; + struct dma_desc *dma_rx ____cacheline_aligned_in_smp; + unsigned int cur_rx; + unsigned int dirty_rx; + unsigned int buf_alloc_num; + u32 rx_zeroc_thresh; + dma_addr_t dma_rx_phy; + u32 rx_tail_addr; + unsigned int state_saved; + struct { + struct sk_buff *skb; + unsigned int len; + unsigned int error; + } state; +}; + +struct stmmac_channel { + struct napi_struct rx_napi ____cacheline_aligned_in_smp; + struct napi_struct tx_napi ____cacheline_aligned_in_smp; + struct napi_struct rxtx_napi ____cacheline_aligned_in_smp; + struct stmmac_priv *priv_data; + spinlock_t lock; + u32 index; +}; + +struct stmmac_tc_entry { + bool in_use; + bool in_hw; + bool is_last; + bool is_frag; + void *frag_ptr; + unsigned int table_pos; + u32 handle; + u32 prio; + struct { + u32 match_data; + u32 match_en; + u8 af:1; + u8 rf:1; + u8 im:1; + u8 nc:1; + u8 res1:4; + u8 frame_offset; + u8 ok_index; + u8 dma_ch_no; + u32 res2; + } __packed val; +}; + +#define STMMAC_PPS_MAX 4 +struct stmmac_pps_cfg { + bool available; + struct timespec64 start; + struct timespec64 period; +}; + +struct stmmac_rss { + int enable; + u8 key[STMMAC_RSS_HASH_KEY_SIZE]; + u32 table[STMMAC_RSS_MAX_TABLE_SIZE]; +}; + +#define STMMAC_FLOW_ACTION_DROP BIT(0) +struct stmmac_flow_entry { + unsigned long cookie; + unsigned long action; + u8 ip_proto; + int in_use; + int idx; + int is_l4; +}; + +/* Rx Frame Steering */ +enum stmmac_rfs_type { + STMMAC_RFS_T_VLAN, + STMMAC_RFS_T_LLDP, + STMMAC_RFS_T_1588, + STMMAC_RFS_T_MAX, +}; + +struct stmmac_rfs_entry { + unsigned long cookie; + u16 etype; + int in_use; + int type; + int tc; +}; + +struct stmmac_dma_conf { + unsigned int dma_buf_sz; + + /* RX Queue */ + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; + unsigned int dma_rx_size; + + /* TX Queue */ + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; + unsigned int dma_tx_size; +}; + +struct stmmac_priv { + /* Frequently used values are kept adjacent for cache effect */ + u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; + u32 tx_coal_timer[MTL_MAX_TX_QUEUES]; + u32 rx_coal_frames[MTL_MAX_TX_QUEUES]; + + int hwts_tx_en; + bool tx_path_in_lpi_mode; + bool tso; + int sph; + int sph_cap; + u32 sarc_type; + + unsigned int rx_copybreak; + u32 rx_riwt[MTL_MAX_TX_QUEUES]; + int hwts_rx_en; + + void __iomem *ioaddr; + struct net_device *dev; + struct device *device; + struct mac_device_info *hw; + int (*hwif_quirks)(struct stmmac_priv *priv); + struct mutex lock; + + struct stmmac_dma_conf dma_conf; + + /* Generic channel for NAPI */ + struct stmmac_channel channel[STMMAC_CH_MAX]; + + int speed; + unsigned int flow_ctrl; + unsigned int pause; + struct mii_bus *mii; + + struct phylink_config phylink_config; + struct phylink *phylink; + + struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp; + struct stmmac_safety_stats sstats; + struct plat_stmmacenet_data *plat; + struct dma_features dma_cap; + struct stmmac_counters mmc; + int hw_cap_support; + int synopsys_id; + u32 msg_enable; + int wolopts; + int wol_irq; + bool wol_irq_disabled; + int clk_csr; + struct timer_list eee_ctrl_timer; + int lpi_irq; + int eee_enabled; + int eee_active; + int tx_lpi_timer; + int tx_lpi_enabled; + int eee_tw_timer; + bool eee_sw_timer_en; + unsigned int mode; + unsigned int chain_mode; + int extend_desc; + struct hwtstamp_config tstamp_config; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_ops; + unsigned int default_addend; + u32 sub_second_inc; + u32 systime_flags; + u32 adv_ts; + int use_riwt; + int irq_wake; + rwlock_t ptp_lock; + /* Protects auxiliary snapshot registers from concurrent access. */ + struct mutex aux_ts_lock; + wait_queue_head_t tstamp_busy_wait; + + void __iomem *mmcaddr; + void __iomem *ptpaddr; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + int sfty_ce_irq; + int sfty_ue_irq; + int rx_irq[MTL_MAX_RX_QUEUES]; + int tx_irq[MTL_MAX_TX_QUEUES]; + /*irq name */ + char int_name_mac[IFNAMSIZ + 9]; + char int_name_wol[IFNAMSIZ + 9]; + char int_name_lpi[IFNAMSIZ + 9]; + char int_name_sfty_ce[IFNAMSIZ + 10]; + char int_name_sfty_ue[IFNAMSIZ + 10]; + char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14]; + char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18]; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs_dir; +#endif + + unsigned long state; + struct workqueue_struct *wq; + struct work_struct service_task; + + /* Workqueue for handling FPE hand-shaking */ + unsigned long fpe_task_state; + struct workqueue_struct *fpe_wq; + struct work_struct fpe_task; + char wq_name[IFNAMSIZ + 4]; + + /* TC Handling */ + unsigned int tc_entries_max; + unsigned int tc_off_max; + struct stmmac_tc_entry *tc_entries; + unsigned int flow_entries_max; + struct stmmac_flow_entry *flow_entries; + unsigned int rfs_entries_max[STMMAC_RFS_T_MAX]; + unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX]; + unsigned int rfs_entries_total; + struct stmmac_rfs_entry *rfs_entries; + + /* Pulse Per Second output */ + struct stmmac_pps_cfg pps[STMMAC_PPS_MAX]; + + /* Receive Side Scaling */ + struct stmmac_rss rss; + + /* XDP BPF Program */ + unsigned long *af_xdp_zc_qps; + struct bpf_prog *xdp_prog; + + /* EtherCAT device variables */ + ec_device_t *ecdev_; + unsigned long ec_watchdog_jiffies; + struct irq_work ec_watchdog_kicker; + bool ecdev_initialized; +}; + +static inline ec_device_t *get_ecdev(struct stmmac_priv *adapter) +{ +#ifdef EC_ENABLE_DRIVER_RESOURCE_VERIFYING + WARN_ON(!adapter->ecdev_initialized); +#endif + return adapter->ecdev_; +} + +enum stmmac_state { + STMMAC_DOWN, + STMMAC_RESET_REQUESTED, + STMMAC_RESETING, + STMMAC_SERVICE_SCHED, +}; + +int stmmac_mdio_unregister(struct net_device *ndev); +int stmmac_mdio_register(struct net_device *ndev); +int stmmac_mdio_reset(struct mii_bus *mii); +int stmmac_xpcs_setup(struct mii_bus *mii); +void stmmac_set_ethtool_ops(struct net_device *netdev); + +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags); +void stmmac_ptp_register(struct stmmac_priv *priv); +void stmmac_ptp_unregister(struct stmmac_priv *priv); +int stmmac_xdp_open(struct net_device *dev); +void stmmac_xdp_release(struct net_device *dev); +int stmmac_resume(struct device *dev); +int stmmac_suspend(struct device *dev); +int stmmac_ec_dvr_remove(struct device *dev); +int stmmac_ec_dvr_probe(struct device *device, + struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *res); +void stmmac_disable_eee_mode(struct stmmac_priv *priv); +bool stmmac_eee_init(struct stmmac_priv *priv); +int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); +int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); +int ec_stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled); +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable); + +static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) +{ + return !!priv->xdp_prog; +} + +static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) +{ + if (stmmac_xdp_is_enabled(priv)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue); +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags); +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time); + +#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) +void stmmac_selftest_run(struct net_device *dev, + struct ethtool_test *etest, u64 *buf); +void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data); +int stmmac_selftest_get_count(struct stmmac_priv *priv); +#else +static inline void stmmac_selftest_run(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + /* Not enabled */ +} +static inline void stmmac_selftest_get_strings(struct stmmac_priv *priv, + u8 *data) +{ + /* Not enabled */ +} +static inline int stmmac_selftest_get_count(struct stmmac_priv *priv) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_STMMAC_SELFTESTS */ + +#endif /* __STMMAC_H__ */ diff --git a/devices/stmmac/stmmac-6.1-orig.h b/devices/stmmac/stmmac-6.1-orig.h new file mode 100644 index 00000000..46944c02 --- /dev/null +++ b/devices/stmmac/stmmac-6.1-orig.h @@ -0,0 +1,404 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __STMMAC_H__ +#define __STMMAC_H__ + +#define STMMAC_RESOURCE_NAME "stmmaceth" + +#include +#include +#include +#include +#include +#include +#include "common.h" +#include +#include +#include +#include +#include + +struct stmmac_resources { + void __iomem *addr; + u8 mac[ETH_ALEN]; + int wol_irq; + int lpi_irq; + int irq; + int sfty_ce_irq; + int sfty_ue_irq; + int rx_irq[MTL_MAX_RX_QUEUES]; + int tx_irq[MTL_MAX_TX_QUEUES]; +}; + +enum stmmac_txbuf_type { + STMMAC_TXBUF_T_SKB, + STMMAC_TXBUF_T_XDP_TX, + STMMAC_TXBUF_T_XDP_NDO, + STMMAC_TXBUF_T_XSK_TX, +}; + +struct stmmac_tx_info { + dma_addr_t buf; + bool map_as_page; + unsigned len; + bool last_segment; + bool is_jumbo; + enum stmmac_txbuf_type buf_type; +}; + +#define STMMAC_TBS_AVAIL BIT(0) +#define STMMAC_TBS_EN BIT(1) + +/* Frequently used values are kept adjacent for cache effect */ +struct stmmac_tx_queue { + u32 tx_count_frames; + int tbs; + struct hrtimer txtimer; + u32 queue_index; + struct stmmac_priv *priv_data; + struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; + struct dma_edesc *dma_entx; + struct dma_desc *dma_tx; + union { + struct sk_buff **tx_skbuff; + struct xdp_frame **xdpf; + }; + struct stmmac_tx_info *tx_skbuff_dma; + struct xsk_buff_pool *xsk_pool; + u32 xsk_frames_done; + unsigned int cur_tx; + unsigned int dirty_tx; + dma_addr_t dma_tx_phy; + dma_addr_t tx_tail_addr; + u32 mss; +}; + +struct stmmac_rx_buffer { + union { + struct { + struct page *page; + dma_addr_t addr; + __u32 page_offset; + }; + struct xdp_buff *xdp; + }; + struct page *sec_page; + dma_addr_t sec_addr; +}; + +struct stmmac_rx_queue { + u32 rx_count_frames; + u32 queue_index; + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; + struct page_pool *page_pool; + struct stmmac_rx_buffer *buf_pool; + struct stmmac_priv *priv_data; + struct dma_extended_desc *dma_erx; + struct dma_desc *dma_rx ____cacheline_aligned_in_smp; + unsigned int cur_rx; + unsigned int dirty_rx; + unsigned int buf_alloc_num; + u32 rx_zeroc_thresh; + dma_addr_t dma_rx_phy; + u32 rx_tail_addr; + unsigned int state_saved; + struct { + struct sk_buff *skb; + unsigned int len; + unsigned int error; + } state; +}; + +struct stmmac_channel { + struct napi_struct rx_napi ____cacheline_aligned_in_smp; + struct napi_struct tx_napi ____cacheline_aligned_in_smp; + struct napi_struct rxtx_napi ____cacheline_aligned_in_smp; + struct stmmac_priv *priv_data; + spinlock_t lock; + u32 index; +}; + +struct stmmac_tc_entry { + bool in_use; + bool in_hw; + bool is_last; + bool is_frag; + void *frag_ptr; + unsigned int table_pos; + u32 handle; + u32 prio; + struct { + u32 match_data; + u32 match_en; + u8 af:1; + u8 rf:1; + u8 im:1; + u8 nc:1; + u8 res1:4; + u8 frame_offset; + u8 ok_index; + u8 dma_ch_no; + u32 res2; + } __packed val; +}; + +#define STMMAC_PPS_MAX 4 +struct stmmac_pps_cfg { + bool available; + struct timespec64 start; + struct timespec64 period; +}; + +struct stmmac_rss { + int enable; + u8 key[STMMAC_RSS_HASH_KEY_SIZE]; + u32 table[STMMAC_RSS_MAX_TABLE_SIZE]; +}; + +#define STMMAC_FLOW_ACTION_DROP BIT(0) +struct stmmac_flow_entry { + unsigned long cookie; + unsigned long action; + u8 ip_proto; + int in_use; + int idx; + int is_l4; +}; + +/* Rx Frame Steering */ +enum stmmac_rfs_type { + STMMAC_RFS_T_VLAN, + STMMAC_RFS_T_LLDP, + STMMAC_RFS_T_1588, + STMMAC_RFS_T_MAX, +}; + +struct stmmac_rfs_entry { + unsigned long cookie; + u16 etype; + int in_use; + int type; + int tc; +}; + +struct stmmac_dma_conf { + unsigned int dma_buf_sz; + + /* RX Queue */ + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; + unsigned int dma_rx_size; + + /* TX Queue */ + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; + unsigned int dma_tx_size; +}; + +struct stmmac_priv { + /* Frequently used values are kept adjacent for cache effect */ + u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; + u32 tx_coal_timer[MTL_MAX_TX_QUEUES]; + u32 rx_coal_frames[MTL_MAX_TX_QUEUES]; + + int hwts_tx_en; + bool tx_path_in_lpi_mode; + bool tso; + int sph; + int sph_cap; + u32 sarc_type; + + unsigned int rx_copybreak; + u32 rx_riwt[MTL_MAX_TX_QUEUES]; + int hwts_rx_en; + + void __iomem *ioaddr; + struct net_device *dev; + struct device *device; + struct mac_device_info *hw; + int (*hwif_quirks)(struct stmmac_priv *priv); + struct mutex lock; + + struct stmmac_dma_conf dma_conf; + + /* Generic channel for NAPI */ + struct stmmac_channel channel[STMMAC_CH_MAX]; + + int speed; + unsigned int flow_ctrl; + unsigned int pause; + struct mii_bus *mii; + + struct phylink_config phylink_config; + struct phylink *phylink; + + struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp; + struct stmmac_safety_stats sstats; + struct plat_stmmacenet_data *plat; + struct dma_features dma_cap; + struct stmmac_counters mmc; + int hw_cap_support; + int synopsys_id; + u32 msg_enable; + int wolopts; + int wol_irq; + bool wol_irq_disabled; + int clk_csr; + struct timer_list eee_ctrl_timer; + int lpi_irq; + int eee_enabled; + int eee_active; + int tx_lpi_timer; + int tx_lpi_enabled; + int eee_tw_timer; + bool eee_sw_timer_en; + unsigned int mode; + unsigned int chain_mode; + int extend_desc; + struct hwtstamp_config tstamp_config; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_ops; + unsigned int default_addend; + u32 sub_second_inc; + u32 systime_flags; + u32 adv_ts; + int use_riwt; + int irq_wake; + rwlock_t ptp_lock; + /* Protects auxiliary snapshot registers from concurrent access. */ + struct mutex aux_ts_lock; + wait_queue_head_t tstamp_busy_wait; + + void __iomem *mmcaddr; + void __iomem *ptpaddr; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + int sfty_ce_irq; + int sfty_ue_irq; + int rx_irq[MTL_MAX_RX_QUEUES]; + int tx_irq[MTL_MAX_TX_QUEUES]; + /*irq name */ + char int_name_mac[IFNAMSIZ + 9]; + char int_name_wol[IFNAMSIZ + 9]; + char int_name_lpi[IFNAMSIZ + 9]; + char int_name_sfty_ce[IFNAMSIZ + 10]; + char int_name_sfty_ue[IFNAMSIZ + 10]; + char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14]; + char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18]; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs_dir; +#endif + + unsigned long state; + struct workqueue_struct *wq; + struct work_struct service_task; + + /* Workqueue for handling FPE hand-shaking */ + unsigned long fpe_task_state; + struct workqueue_struct *fpe_wq; + struct work_struct fpe_task; + char wq_name[IFNAMSIZ + 4]; + + /* TC Handling */ + unsigned int tc_entries_max; + unsigned int tc_off_max; + struct stmmac_tc_entry *tc_entries; + unsigned int flow_entries_max; + struct stmmac_flow_entry *flow_entries; + unsigned int rfs_entries_max[STMMAC_RFS_T_MAX]; + unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX]; + unsigned int rfs_entries_total; + struct stmmac_rfs_entry *rfs_entries; + + /* Pulse Per Second output */ + struct stmmac_pps_cfg pps[STMMAC_PPS_MAX]; + + /* Receive Side Scaling */ + struct stmmac_rss rss; + + /* XDP BPF Program */ + unsigned long *af_xdp_zc_qps; + struct bpf_prog *xdp_prog; +}; + +enum stmmac_state { + STMMAC_DOWN, + STMMAC_RESET_REQUESTED, + STMMAC_RESETING, + STMMAC_SERVICE_SCHED, +}; + +int stmmac_mdio_unregister(struct net_device *ndev); +int stmmac_mdio_register(struct net_device *ndev); +int stmmac_mdio_reset(struct mii_bus *mii); +int stmmac_xpcs_setup(struct mii_bus *mii); +void stmmac_set_ethtool_ops(struct net_device *netdev); + +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags); +void stmmac_ptp_register(struct stmmac_priv *priv); +void stmmac_ptp_unregister(struct stmmac_priv *priv); +int stmmac_xdp_open(struct net_device *dev); +void stmmac_xdp_release(struct net_device *dev); +int stmmac_resume(struct device *dev); +int stmmac_suspend(struct device *dev); +int stmmac_dvr_remove(struct device *dev); +int stmmac_dvr_probe(struct device *device, + struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *res); +void stmmac_disable_eee_mode(struct stmmac_priv *priv); +bool stmmac_eee_init(struct stmmac_priv *priv); +int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); +int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled); +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable); + +static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) +{ + return !!priv->xdp_prog; +} + +static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) +{ + if (stmmac_xdp_is_enabled(priv)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue); +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags); +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time); + +#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) +void stmmac_selftest_run(struct net_device *dev, + struct ethtool_test *etest, u64 *buf); +void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data); +int stmmac_selftest_get_count(struct stmmac_priv *priv); +#else +static inline void stmmac_selftest_run(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + /* Not enabled */ +} +static inline void stmmac_selftest_get_strings(struct stmmac_priv *priv, + u8 *data) +{ + /* Not enabled */ +} +static inline int stmmac_selftest_get_count(struct stmmac_priv *priv) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_STMMAC_SELFTESTS */ + +#endif /* __STMMAC_H__ */ diff --git a/devices/stmmac/stmmac_ethtool-6.1-ethercat.c b/devices/stmmac/stmmac_ethtool-6.1-ethercat.c new file mode 100644 index 00000000..8899c195 --- /dev/null +++ b/devices/stmmac/stmmac_ethtool-6.1-ethercat.c @@ -0,0 +1,1202 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + STMMAC Ethtool support + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac-6.1-ethercat.h" +#include "dwmac_dma-6.1-ethercat.h" +#include "dwxgmac2-6.1-ethercat.h" + +#define REG_SPACE_SIZE 0x1060 +#define GMAC4_REG_SPACE_SIZE 0x116C +#define MAC100_ETHTOOL_NAME "st_mac100" +#define GMAC_ETHTOOL_NAME "st_gmac" +#define XGMAC_ETHTOOL_NAME "st_xgmac" + +/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h + * + * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the + * same time due to the conflicting macro names. + */ +#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100 + +#define ETHTOOL_DMA_OFFSET 55 + +struct stmmac_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define STMMAC_STAT(m) \ + { #m, sizeof_field(struct stmmac_extra_stats, m), \ + offsetof(struct stmmac_priv, xstats.m)} + +static const struct stmmac_stats stmmac_gstrings_stats[] = { + /* Transmit errors */ + STMMAC_STAT(tx_underflow), + STMMAC_STAT(tx_carrier), + STMMAC_STAT(tx_losscarrier), + STMMAC_STAT(vlan_tag), + STMMAC_STAT(tx_deferred), + STMMAC_STAT(tx_vlan), + STMMAC_STAT(tx_jabber), + STMMAC_STAT(tx_frame_flushed), + STMMAC_STAT(tx_payload_error), + STMMAC_STAT(tx_ip_header_error), + /* Receive errors */ + STMMAC_STAT(rx_desc), + STMMAC_STAT(sa_filter_fail), + STMMAC_STAT(overflow_error), + STMMAC_STAT(ipc_csum_error), + STMMAC_STAT(rx_collision), + STMMAC_STAT(rx_crc_errors), + STMMAC_STAT(dribbling_bit), + STMMAC_STAT(rx_length), + STMMAC_STAT(rx_mii), + STMMAC_STAT(rx_multicast), + STMMAC_STAT(rx_gmac_overflow), + STMMAC_STAT(rx_watchdog), + STMMAC_STAT(da_rx_filter_fail), + STMMAC_STAT(sa_rx_filter_fail), + STMMAC_STAT(rx_missed_cntr), + STMMAC_STAT(rx_overflow_cntr), + STMMAC_STAT(rx_vlan), + STMMAC_STAT(rx_split_hdr_pkt_n), + /* Tx/Rx IRQ error info */ + STMMAC_STAT(tx_undeflow_irq), + STMMAC_STAT(tx_process_stopped_irq), + STMMAC_STAT(tx_jabber_irq), + STMMAC_STAT(rx_overflow_irq), + STMMAC_STAT(rx_buf_unav_irq), + STMMAC_STAT(rx_process_stopped_irq), + STMMAC_STAT(rx_watchdog_irq), + STMMAC_STAT(tx_early_irq), + STMMAC_STAT(fatal_bus_error_irq), + /* Tx/Rx IRQ Events */ + STMMAC_STAT(rx_early_irq), + STMMAC_STAT(threshold), + STMMAC_STAT(tx_pkt_n), + STMMAC_STAT(rx_pkt_n), + STMMAC_STAT(normal_irq_n), + STMMAC_STAT(rx_normal_irq_n), + STMMAC_STAT(napi_poll), + STMMAC_STAT(tx_normal_irq_n), + STMMAC_STAT(tx_clean), + STMMAC_STAT(tx_set_ic_bit), + STMMAC_STAT(irq_receive_pmt_irq_n), + /* MMC info */ + STMMAC_STAT(mmc_tx_irq_n), + STMMAC_STAT(mmc_rx_irq_n), + STMMAC_STAT(mmc_rx_csum_offload_irq_n), + /* EEE */ + STMMAC_STAT(irq_tx_path_in_lpi_mode_n), + STMMAC_STAT(irq_tx_path_exit_lpi_mode_n), + STMMAC_STAT(irq_rx_path_in_lpi_mode_n), + STMMAC_STAT(irq_rx_path_exit_lpi_mode_n), + STMMAC_STAT(phy_eee_wakeup_error_n), + /* Extended RDES status */ + STMMAC_STAT(ip_hdr_err), + STMMAC_STAT(ip_payload_err), + STMMAC_STAT(ip_csum_bypassed), + STMMAC_STAT(ipv4_pkt_rcvd), + STMMAC_STAT(ipv6_pkt_rcvd), + STMMAC_STAT(no_ptp_rx_msg_type_ext), + STMMAC_STAT(ptp_rx_msg_type_sync), + STMMAC_STAT(ptp_rx_msg_type_follow_up), + STMMAC_STAT(ptp_rx_msg_type_delay_req), + STMMAC_STAT(ptp_rx_msg_type_delay_resp), + STMMAC_STAT(ptp_rx_msg_type_pdelay_req), + STMMAC_STAT(ptp_rx_msg_type_pdelay_resp), + STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up), + STMMAC_STAT(ptp_rx_msg_type_announce), + STMMAC_STAT(ptp_rx_msg_type_management), + STMMAC_STAT(ptp_rx_msg_pkt_reserved_type), + STMMAC_STAT(ptp_frame_type), + STMMAC_STAT(ptp_ver), + STMMAC_STAT(timestamp_dropped), + STMMAC_STAT(av_pkt_rcvd), + STMMAC_STAT(av_tagged_pkt_rcvd), + STMMAC_STAT(vlan_tag_priority_val), + STMMAC_STAT(l3_filter_match), + STMMAC_STAT(l4_filter_match), + STMMAC_STAT(l3_l4_filter_no_match), + /* PCS */ + STMMAC_STAT(irq_pcs_ane_n), + STMMAC_STAT(irq_pcs_link_n), + STMMAC_STAT(irq_rgmii_n), + /* DEBUG */ + STMMAC_STAT(mtl_tx_status_fifo_full), + STMMAC_STAT(mtl_tx_fifo_not_empty), + STMMAC_STAT(mmtl_fifo_ctrl), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_write), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_read), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle), + STMMAC_STAT(mac_tx_in_pause), + STMMAC_STAT(mac_tx_frame_ctrl_xfer), + STMMAC_STAT(mac_tx_frame_ctrl_idle), + STMMAC_STAT(mac_tx_frame_ctrl_wait), + STMMAC_STAT(mac_tx_frame_ctrl_pause), + STMMAC_STAT(mac_gmii_tx_proto_engine), + STMMAC_STAT(mtl_rx_fifo_fill_level_full), + STMMAC_STAT(mtl_rx_fifo_fill_above_thresh), + STMMAC_STAT(mtl_rx_fifo_fill_below_thresh), + STMMAC_STAT(mtl_rx_fifo_fill_level_empty), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_status), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle), + STMMAC_STAT(mtl_rx_fifo_ctrl_active), + STMMAC_STAT(mac_rx_frame_ctrl_fifo), + STMMAC_STAT(mac_gmii_rx_proto_engine), + /* TSO */ + STMMAC_STAT(tx_tso_frames), + STMMAC_STAT(tx_tso_nfrags), + /* EST */ + STMMAC_STAT(mtl_est_cgce), + STMMAC_STAT(mtl_est_hlbs), + STMMAC_STAT(mtl_est_hlbf), + STMMAC_STAT(mtl_est_btre), + STMMAC_STAT(mtl_est_btrlm), +}; +#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) + +/* HW MAC Management counters (if supported) */ +#define STMMAC_MMC_STAT(m) \ + { #m, sizeof_field(struct stmmac_counters, m), \ + offsetof(struct stmmac_priv, mmc.m)} + +static const struct stmmac_stats stmmac_mmc[] = { + STMMAC_MMC_STAT(mmc_tx_octetcount_gb), + STMMAC_MMC_STAT(mmc_tx_framecount_gb), + STMMAC_MMC_STAT(mmc_tx_broadcastframe_g), + STMMAC_MMC_STAT(mmc_tx_multicastframe_g), + STMMAC_MMC_STAT(mmc_tx_64_octets_gb), + STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb), + STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb), + STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb), + STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb), + STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb), + STMMAC_MMC_STAT(mmc_tx_unicast_gb), + STMMAC_MMC_STAT(mmc_tx_multicast_gb), + STMMAC_MMC_STAT(mmc_tx_broadcast_gb), + STMMAC_MMC_STAT(mmc_tx_underflow_error), + STMMAC_MMC_STAT(mmc_tx_singlecol_g), + STMMAC_MMC_STAT(mmc_tx_multicol_g), + STMMAC_MMC_STAT(mmc_tx_deferred), + STMMAC_MMC_STAT(mmc_tx_latecol), + STMMAC_MMC_STAT(mmc_tx_exesscol), + STMMAC_MMC_STAT(mmc_tx_carrier_error), + STMMAC_MMC_STAT(mmc_tx_octetcount_g), + STMMAC_MMC_STAT(mmc_tx_framecount_g), + STMMAC_MMC_STAT(mmc_tx_excessdef), + STMMAC_MMC_STAT(mmc_tx_pause_frame), + STMMAC_MMC_STAT(mmc_tx_vlan_frame_g), + STMMAC_MMC_STAT(mmc_rx_framecount_gb), + STMMAC_MMC_STAT(mmc_rx_octetcount_gb), + STMMAC_MMC_STAT(mmc_rx_octetcount_g), + STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), + STMMAC_MMC_STAT(mmc_rx_multicastframe_g), + STMMAC_MMC_STAT(mmc_rx_crc_error), + STMMAC_MMC_STAT(mmc_rx_align_error), + STMMAC_MMC_STAT(mmc_rx_run_error), + STMMAC_MMC_STAT(mmc_rx_jabber_error), + STMMAC_MMC_STAT(mmc_rx_undersize_g), + STMMAC_MMC_STAT(mmc_rx_oversize_g), + STMMAC_MMC_STAT(mmc_rx_64_octets_gb), + STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb), + STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb), + STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb), + STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb), + STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb), + STMMAC_MMC_STAT(mmc_rx_unicast_g), + STMMAC_MMC_STAT(mmc_rx_length_error), + STMMAC_MMC_STAT(mmc_rx_autofrangetype), + STMMAC_MMC_STAT(mmc_rx_pause_frames), + STMMAC_MMC_STAT(mmc_rx_fifo_overflow), + STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb), + STMMAC_MMC_STAT(mmc_rx_watchdog_error), + STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask), + STMMAC_MMC_STAT(mmc_rx_ipc_intr), + STMMAC_MMC_STAT(mmc_rx_ipv4_gd), + STMMAC_MMC_STAT(mmc_rx_ipv4_hderr), + STMMAC_MMC_STAT(mmc_rx_ipv4_nopay), + STMMAC_MMC_STAT(mmc_rx_ipv4_frag), + STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl), + STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_gd), + STMMAC_MMC_STAT(mmc_rx_ipv6_hderr), + STMMAC_MMC_STAT(mmc_rx_ipv6_nopay), + STMMAC_MMC_STAT(mmc_rx_udp_gd), + STMMAC_MMC_STAT(mmc_rx_udp_err), + STMMAC_MMC_STAT(mmc_rx_tcp_gd), + STMMAC_MMC_STAT(mmc_rx_tcp_err), + STMMAC_MMC_STAT(mmc_rx_icmp_gd), + STMMAC_MMC_STAT(mmc_rx_icmp_err), + STMMAC_MMC_STAT(mmc_rx_udp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_udp_err_octets), + STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_tcp_err_octets), + STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_icmp_err_octets), + STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr), + STMMAC_MMC_STAT(mmc_tx_hold_req_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr), + STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr), +}; +#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc) + +static const char stmmac_qstats_tx_string[][ETH_GSTRING_LEN] = { + "tx_pkt_n", + "tx_irq_n", +#define STMMAC_TXQ_STATS ARRAY_SIZE(stmmac_qstats_tx_string) +}; + +static const char stmmac_qstats_rx_string[][ETH_GSTRING_LEN] = { + "rx_pkt_n", + "rx_irq_n", +#define STMMAC_RXQ_STATS ARRAY_SIZE(stmmac_qstats_rx_string) +}; + +static void stmmac_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->has_gmac || priv->plat->has_gmac4) + strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); + else if (priv->plat->has_xgmac) + strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver)); + else + strscpy(info->driver, MAC100_ETHTOOL_NAME, + sizeof(info->driver)); + + if (priv->plat->pdev) { + strscpy(info->bus_info, pci_name(priv->plat->pdev), + sizeof(info->bus_info)); + } +} + +static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { + struct rgmii_adv adv; + u32 supported, advertising, lp_advertising; + + if (!priv->xstats.pcs_link) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + return 0; + } + cmd->base.duplex = priv->xstats.pcs_duplex; + + cmd->base.speed = priv->xstats.pcs_speed; + + /* Get and convert ADV/LP_ADV from the HW AN registers */ + if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv)) + return -EOPNOTSUPP; /* should never happen indeed */ + + /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ + + ethtool_convert_link_mode_to_legacy_u32( + &supported, cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32( + &advertising, cmd->link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32( + &lp_advertising, cmd->link_modes.lp_advertising); + + if (adv.pause & STMMAC_PCS_PAUSE) + advertising |= ADVERTISED_Pause; + if (adv.pause & STMMAC_PCS_ASYM_PAUSE) + advertising |= ADVERTISED_Asym_Pause; + if (adv.lp_pause & STMMAC_PCS_PAUSE) + lp_advertising |= ADVERTISED_Pause; + if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) + lp_advertising |= ADVERTISED_Asym_Pause; + + /* Reg49[3] always set because ANE is always supported */ + cmd->base.autoneg = ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; + lp_advertising |= ADVERTISED_Autoneg; + + if (adv.duplex) { + supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Full); + advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); + } else { + supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Half); + advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + } + if (adv.lp_duplex) + lp_advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); + else + lp_advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + cmd->base.port = PORT_OTHER; + + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.supported, supported); + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.advertising, advertising); + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.lp_advertising, lp_advertising); + + return 0; + } + + return phylink_ethtool_ksettings_get(priv->phylink, cmd); +} + +static int +stmmac_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { + u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; + + /* Only support ANE */ + if (cmd->base.autoneg != AUTONEG_ENABLE) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + mutex_lock(&priv->lock); + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); + mutex_unlock(&priv->lock); + + return 0; + } + + return phylink_ethtool_ksettings_set(priv->phylink, cmd); +} + +static u32 stmmac_ethtool_getmsglevel(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct stmmac_priv *priv = netdev_priv(dev); + priv->msg_enable = level; + +} + +static int stmmac_check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EBUSY; + return 0; +} + +static int stmmac_ethtool_get_regs_len(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->has_xgmac) + return XGMAC_REGSIZE * 4; + else if (priv->plat->has_gmac4) + return GMAC4_REG_SPACE_SIZE; + return REG_SPACE_SIZE; +} + +static void stmmac_ethtool_gregs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 *reg_space = (u32 *) space; + + stmmac_dump_mac_regs(priv, priv->hw, reg_space); + stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); + + /* Copy DMA registers to where ethtool expects them */ + if (priv->plat->has_gmac4) { + /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */ + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[GMAC4_DMA_CHAN_BASE_ADDR / 4], + NUM_DWMAC4_DMA_REGS * 4); + } else if (!priv->plat->has_xgmac) { + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[DMA_BUS_MODE / 4], + NUM_DWMAC1000_DMA_REGS * 4); + } +} + +static int stmmac_nway_reset(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return phylink_ethtool_nway_reset(priv->phylink); +} + +static void stmmac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + ring->rx_max_pending = DMA_MAX_RX_SIZE; + ring->tx_max_pending = DMA_MAX_TX_SIZE; + ring->rx_pending = priv->dma_conf.dma_rx_size; + ring->tx_pending = priv->dma_conf.dma_tx_size; +} + +static int stmmac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + if (ring->rx_mini_pending || ring->rx_jumbo_pending || + ring->rx_pending < DMA_MIN_RX_SIZE || + ring->rx_pending > DMA_MAX_RX_SIZE || + !is_power_of_2(ring->rx_pending) || + ring->tx_pending < DMA_MIN_TX_SIZE || + ring->tx_pending > DMA_MAX_TX_SIZE || + !is_power_of_2(ring->tx_pending)) + return -EINVAL; + + return stmmac_reinit_ringparam(netdev, ring->rx_pending, + ring->tx_pending); +} + +static void +stmmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + struct rgmii_adv adv_lp; + + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { + pause->autoneg = 1; + if (!adv_lp.pause) + return; + } else { + phylink_ethtool_get_pauseparam(priv->phylink, pause); + } +} + +static int +stmmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + struct rgmii_adv adv_lp; + + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { + pause->autoneg = 1; + if (!adv_lp.pause) + return -EOPNOTSUPP; + return 0; + } else { + return phylink_ethtool_set_pauseparam(priv->phylink, pause); + } +} + +static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int q, stat; + char *p; + + for (q = 0; q < tx_cnt; q++) { + p = (char *)priv + offsetof(struct stmmac_priv, + xstats.txq_stats[q].tx_pkt_n); + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { + *data++ = (*(unsigned long *)p); + p += sizeof(unsigned long); + } + } + for (q = 0; q < rx_cnt; q++) { + p = (char *)priv + offsetof(struct stmmac_priv, + xstats.rxq_stats[q].rx_pkt_n); + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { + *data++ = (*(unsigned long *)p); + p += sizeof(unsigned long); + } + } +} + +static void stmmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *dummy, u64 *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 tx_queues_count = priv->plat->tx_queues_to_use; + unsigned long count; + int i, j = 0, ret; + + if (priv->dma_cap.asp) { + for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { + if (!stmmac_safety_feat_dump(priv, &priv->sstats, i, + &count, NULL)) + data[j++] = count; + } + } + + /* Update the DMA HW counters for dwmac10/100 */ + ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats, + priv->ioaddr); + if (ret) { + /* If supported, for new GMAC chips expose the MMC counters */ + if (priv->dma_cap.rmon) { + stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc); + + for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { + char *p; + p = (char *)priv + stmmac_mmc[i].stat_offset; + + data[j++] = (stmmac_mmc[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : + (*(u32 *)p); + } + } + if (priv->eee_enabled) { + int val = phylink_get_eee_err(priv->phylink); + if (val) + priv->xstats.phy_eee_wakeup_error_n = val; + } + + if (priv->synopsys_id >= DWMAC_CORE_3_50) + stmmac_mac_debug(priv, priv->ioaddr, + (void *)&priv->xstats, + rx_queues_count, tx_queues_count); + } + for (i = 0; i < STMMAC_STATS_LEN; i++) { + char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; + data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); + } + stmmac_get_per_qstats(priv, &data[j]); +} + +static int stmmac_get_sset_count(struct net_device *netdev, int sset) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int i, len, safety_len = 0; + + switch (sset) { + case ETH_SS_STATS: + len = STMMAC_STATS_LEN + + STMMAC_TXQ_STATS * tx_cnt + + STMMAC_RXQ_STATS * rx_cnt; + + if (priv->dma_cap.rmon) + len += STMMAC_MMC_STATS_LEN; + if (priv->dma_cap.asp) { + for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { + if (!stmmac_safety_feat_dump(priv, + &priv->sstats, i, + NULL, NULL)) + safety_len++; + } + + len += safety_len; + } + + return len; + case ETH_SS_TEST: + return stmmac_selftest_get_count(priv); + default: + return -EOPNOTSUPP; + } +} + +static void stmmac_get_qstats_string(struct stmmac_priv *priv, u8 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int q, stat; + + for (q = 0; q < tx_cnt; q++) { + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_tx_string[stat]); + data += ETH_GSTRING_LEN; + } + } + for (q = 0; q < rx_cnt; q++) { + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_rx_string[stat]); + data += ETH_GSTRING_LEN; + } + } +} + +static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + struct stmmac_priv *priv = netdev_priv(dev); + + switch (stringset) { + case ETH_SS_STATS: + if (priv->dma_cap.asp) { + for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { + const char *desc; + if (!stmmac_safety_feat_dump(priv, + &priv->sstats, i, + NULL, &desc)) { + memcpy(p, desc, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } + } + if (priv->dma_cap.rmon) + for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { + memcpy(p, stmmac_mmc[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < STMMAC_STATS_LEN; i++) { + memcpy(p, stmmac_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + stmmac_get_qstats_string(priv, p); + break; + case ETH_SS_TEST: + stmmac_selftest_get_strings(priv, p); + break; + default: + WARN_ON(1); + break; + } +} + +/* Currently only support WOL through Magic packet. */ +static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->plat->pmt) + return phylink_ethtool_get_wol(priv->phylink, wol); + + mutex_lock(&priv->lock); + if (device_can_wakeup(priv->device)) { + wol->supported = WAKE_MAGIC | WAKE_UCAST; + if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame) + wol->supported &= ~WAKE_MAGIC; + wol->wolopts = priv->wolopts; + } + mutex_unlock(&priv->lock); +} + +static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 support = WAKE_MAGIC | WAKE_UCAST; + + if (!device_can_wakeup(priv->device)) + return -EOPNOTSUPP; + + if (!priv->plat->pmt) { + int ret = phylink_ethtool_set_wol(priv->phylink, wol); + + if (!ret) + device_set_wakeup_enable(priv->device, !!wol->wolopts); + return ret; + } + + /* By default almost all GMAC devices support the WoL via + * magic frame but we can disable it if the HW capability + * register shows no support for pmt_magic_frame. */ + if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame)) + wol->wolopts &= ~WAKE_MAGIC; + + if (wol->wolopts & ~support) + return -EINVAL; + + if (wol->wolopts) { + pr_info("stmmac: wakeup enable\n"); + device_set_wakeup_enable(priv->device, 1); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; + } else { + device_set_wakeup_enable(priv->device, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; + } + + mutex_lock(&priv->lock); + priv->wolopts = wol->wolopts; + mutex_unlock(&priv->lock); + + return 0; +} + +static int stmmac_ethtool_op_get_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->dma_cap.eee) + return -EOPNOTSUPP; + + edata->eee_enabled = priv->eee_enabled; + edata->eee_active = priv->eee_active; + edata->tx_lpi_timer = priv->tx_lpi_timer; + edata->tx_lpi_enabled = priv->tx_lpi_enabled; + + return phylink_ethtool_get_eee(priv->phylink, edata); +} + +static int stmmac_ethtool_op_set_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + if (!priv->dma_cap.eee) + return -EOPNOTSUPP; + + if (priv->tx_lpi_enabled != edata->tx_lpi_enabled) + netdev_warn(priv->dev, + "Setting EEE tx-lpi is not supported\n"); + + if (!edata->eee_enabled) + stmmac_disable_eee_mode(priv); + + ret = phylink_ethtool_set_eee(priv->phylink, edata); + if (ret) + return ret; + + if (edata->eee_enabled && + priv->tx_lpi_timer != edata->tx_lpi_timer) { + priv->tx_lpi_timer = edata->tx_lpi_timer; + stmmac_eee_init(priv); + } + + return 0; +} + +static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) +{ + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); + + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } + + return (usec * (clk / 1000000)) / 256; +} + +static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) +{ + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); + + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } + + return (riwt * 256) / (clk / 1000000); +} + +static int __stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; + + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); + + if (queue < 0) + queue = 0; + else if (queue >= max_cnt) + return -EINVAL; + + if (queue < tx_cnt) { + ec->tx_coalesce_usecs = priv->tx_coal_timer[queue]; + ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue]; + } else { + ec->tx_coalesce_usecs = 0; + ec->tx_max_coalesced_frames = 0; + } + + if (priv->use_riwt && queue < rx_cnt) { + ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue]; + ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue], + priv); + } else { + ec->rx_max_coalesced_frames = 0; + ec->rx_coalesce_usecs = 0; + } + + return 0; +} + +static int stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + return __stmmac_get_coalesce(dev, ec, -1); +} + +static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_get_coalesce(dev, ec, queue); +} + +static int __stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) +{ + struct stmmac_priv *priv = netdev_priv(dev); + bool all_queues = false; + unsigned int rx_riwt; + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; + + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); + + if (queue < 0) + all_queues = true; + else if (queue >= max_cnt) + return -EINVAL; + + if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) { + rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); + + if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) + return -EINVAL; + + if (all_queues) { + int i; + + for (i = 0; i < rx_cnt; i++) { + priv->rx_riwt[i] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, i); + priv->rx_coal_frames[i] = + ec->rx_max_coalesced_frames; + } + } else if (queue < rx_cnt) { + priv->rx_riwt[queue] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, queue); + priv->rx_coal_frames[queue] = + ec->rx_max_coalesced_frames; + } + } + + if ((ec->tx_coalesce_usecs == 0) && + (ec->tx_max_coalesced_frames == 0)) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) || + (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) + return -EINVAL; + + if (all_queues) { + int i; + + for (i = 0; i < tx_cnt; i++) { + priv->tx_coal_frames[i] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[i] = + ec->tx_coalesce_usecs; + } + } else if (queue < tx_cnt) { + priv->tx_coal_frames[queue] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[queue] = + ec->tx_coalesce_usecs; + } + + return 0; +} + +static int stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + return __stmmac_set_coalesce(dev, ec, -1); +} + +static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_set_coalesce(dev, ec, queue); +} + +static int stmmac_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->plat->rx_queues_to_use; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 stmmac_get_rxfh_key_size(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return sizeof(priv->rss.key); +} + +static u32 stmmac_get_rxfh_indir_size(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return ARRAY_SIZE(priv->rss.table); +} + +static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + if (indir) { + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + indir[i] = priv->rss.table[i]; + } + + if (key) + memcpy(key, priv->rss.key, sizeof(priv->rss.key)); + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (indir) { + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + priv->rss.table[i] = indir[i]; + } + + if (key) + memcpy(priv->rss.key, key, sizeof(priv->rss.key)); + + return stmmac_rss_configure(priv, priv->hw, &priv->rss, + priv->plat->rx_queues_to_use); +} + +static void stmmac_get_channels(struct net_device *dev, + struct ethtool_channels *chan) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + chan->rx_count = priv->plat->rx_queues_to_use; + chan->tx_count = priv->plat->tx_queues_to_use; + chan->max_rx = priv->dma_cap.number_rx_queues; + chan->max_tx = priv->dma_cap.number_tx_queues; +} + +static int stmmac_set_channels(struct net_device *dev, + struct ethtool_channels *chan) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (chan->rx_count > priv->dma_cap.number_rx_queues || + chan->tx_count > priv->dma_cap.number_tx_queues || + !chan->rx_count || !chan->tx_count) + return -EINVAL; + + return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count); +} + +static int stmmac_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) { + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (priv->ptp_clock) + info->phc_index = ptp_clock_index(priv->ptp_clock); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL)); + return 0; + } else + return ethtool_op_get_ts_info(dev, info); +} + +static int stmmac_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = priv->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int stmmac_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + priv->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct ethtool_ops stmmac_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .begin = stmmac_check_if_running, + .get_drvinfo = stmmac_ethtool_getdrvinfo, + .get_msglevel = stmmac_ethtool_getmsglevel, + .set_msglevel = stmmac_ethtool_setmsglevel, + .get_regs = stmmac_ethtool_gregs, + .get_regs_len = stmmac_ethtool_get_regs_len, + .get_link = ethtool_op_get_link, + .nway_reset = stmmac_nway_reset, + .get_ringparam = stmmac_get_ringparam, + .set_ringparam = stmmac_set_ringparam, + .get_pauseparam = stmmac_get_pauseparam, + .set_pauseparam = stmmac_set_pauseparam, + .self_test = stmmac_selftest_run, + .get_ethtool_stats = stmmac_get_ethtool_stats, + .get_strings = stmmac_get_strings, + .get_wol = stmmac_get_wol, + .set_wol = stmmac_set_wol, + .get_eee = stmmac_ethtool_op_get_eee, + .set_eee = stmmac_ethtool_op_set_eee, + .get_sset_count = stmmac_get_sset_count, + .get_rxnfc = stmmac_get_rxnfc, + .get_rxfh_key_size = stmmac_get_rxfh_key_size, + .get_rxfh_indir_size = stmmac_get_rxfh_indir_size, + .get_rxfh = stmmac_get_rxfh, + .set_rxfh = stmmac_set_rxfh, + .get_ts_info = stmmac_get_ts_info, + .get_coalesce = stmmac_get_coalesce, + .set_coalesce = stmmac_set_coalesce, + .get_per_queue_coalesce = stmmac_get_per_queue_coalesce, + .set_per_queue_coalesce = stmmac_set_per_queue_coalesce, + .get_channels = stmmac_get_channels, + .set_channels = stmmac_set_channels, + .get_tunable = stmmac_get_tunable, + .set_tunable = stmmac_set_tunable, + .get_link_ksettings = stmmac_ethtool_get_link_ksettings, + .set_link_ksettings = stmmac_ethtool_set_link_ksettings, +}; + +void stmmac_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &stmmac_ethtool_ops; +} diff --git a/devices/stmmac/stmmac_ethtool-6.1-orig.c b/devices/stmmac/stmmac_ethtool-6.1-orig.c new file mode 100644 index 00000000..f03aa8a0 --- /dev/null +++ b/devices/stmmac/stmmac_ethtool-6.1-orig.c @@ -0,0 +1,1202 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + STMMAC Ethtool support + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "dwmac_dma.h" +#include "dwxgmac2.h" + +#define REG_SPACE_SIZE 0x1060 +#define GMAC4_REG_SPACE_SIZE 0x116C +#define MAC100_ETHTOOL_NAME "st_mac100" +#define GMAC_ETHTOOL_NAME "st_gmac" +#define XGMAC_ETHTOOL_NAME "st_xgmac" + +/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h + * + * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the + * same time due to the conflicting macro names. + */ +#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100 + +#define ETHTOOL_DMA_OFFSET 55 + +struct stmmac_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define STMMAC_STAT(m) \ + { #m, sizeof_field(struct stmmac_extra_stats, m), \ + offsetof(struct stmmac_priv, xstats.m)} + +static const struct stmmac_stats stmmac_gstrings_stats[] = { + /* Transmit errors */ + STMMAC_STAT(tx_underflow), + STMMAC_STAT(tx_carrier), + STMMAC_STAT(tx_losscarrier), + STMMAC_STAT(vlan_tag), + STMMAC_STAT(tx_deferred), + STMMAC_STAT(tx_vlan), + STMMAC_STAT(tx_jabber), + STMMAC_STAT(tx_frame_flushed), + STMMAC_STAT(tx_payload_error), + STMMAC_STAT(tx_ip_header_error), + /* Receive errors */ + STMMAC_STAT(rx_desc), + STMMAC_STAT(sa_filter_fail), + STMMAC_STAT(overflow_error), + STMMAC_STAT(ipc_csum_error), + STMMAC_STAT(rx_collision), + STMMAC_STAT(rx_crc_errors), + STMMAC_STAT(dribbling_bit), + STMMAC_STAT(rx_length), + STMMAC_STAT(rx_mii), + STMMAC_STAT(rx_multicast), + STMMAC_STAT(rx_gmac_overflow), + STMMAC_STAT(rx_watchdog), + STMMAC_STAT(da_rx_filter_fail), + STMMAC_STAT(sa_rx_filter_fail), + STMMAC_STAT(rx_missed_cntr), + STMMAC_STAT(rx_overflow_cntr), + STMMAC_STAT(rx_vlan), + STMMAC_STAT(rx_split_hdr_pkt_n), + /* Tx/Rx IRQ error info */ + STMMAC_STAT(tx_undeflow_irq), + STMMAC_STAT(tx_process_stopped_irq), + STMMAC_STAT(tx_jabber_irq), + STMMAC_STAT(rx_overflow_irq), + STMMAC_STAT(rx_buf_unav_irq), + STMMAC_STAT(rx_process_stopped_irq), + STMMAC_STAT(rx_watchdog_irq), + STMMAC_STAT(tx_early_irq), + STMMAC_STAT(fatal_bus_error_irq), + /* Tx/Rx IRQ Events */ + STMMAC_STAT(rx_early_irq), + STMMAC_STAT(threshold), + STMMAC_STAT(tx_pkt_n), + STMMAC_STAT(rx_pkt_n), + STMMAC_STAT(normal_irq_n), + STMMAC_STAT(rx_normal_irq_n), + STMMAC_STAT(napi_poll), + STMMAC_STAT(tx_normal_irq_n), + STMMAC_STAT(tx_clean), + STMMAC_STAT(tx_set_ic_bit), + STMMAC_STAT(irq_receive_pmt_irq_n), + /* MMC info */ + STMMAC_STAT(mmc_tx_irq_n), + STMMAC_STAT(mmc_rx_irq_n), + STMMAC_STAT(mmc_rx_csum_offload_irq_n), + /* EEE */ + STMMAC_STAT(irq_tx_path_in_lpi_mode_n), + STMMAC_STAT(irq_tx_path_exit_lpi_mode_n), + STMMAC_STAT(irq_rx_path_in_lpi_mode_n), + STMMAC_STAT(irq_rx_path_exit_lpi_mode_n), + STMMAC_STAT(phy_eee_wakeup_error_n), + /* Extended RDES status */ + STMMAC_STAT(ip_hdr_err), + STMMAC_STAT(ip_payload_err), + STMMAC_STAT(ip_csum_bypassed), + STMMAC_STAT(ipv4_pkt_rcvd), + STMMAC_STAT(ipv6_pkt_rcvd), + STMMAC_STAT(no_ptp_rx_msg_type_ext), + STMMAC_STAT(ptp_rx_msg_type_sync), + STMMAC_STAT(ptp_rx_msg_type_follow_up), + STMMAC_STAT(ptp_rx_msg_type_delay_req), + STMMAC_STAT(ptp_rx_msg_type_delay_resp), + STMMAC_STAT(ptp_rx_msg_type_pdelay_req), + STMMAC_STAT(ptp_rx_msg_type_pdelay_resp), + STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up), + STMMAC_STAT(ptp_rx_msg_type_announce), + STMMAC_STAT(ptp_rx_msg_type_management), + STMMAC_STAT(ptp_rx_msg_pkt_reserved_type), + STMMAC_STAT(ptp_frame_type), + STMMAC_STAT(ptp_ver), + STMMAC_STAT(timestamp_dropped), + STMMAC_STAT(av_pkt_rcvd), + STMMAC_STAT(av_tagged_pkt_rcvd), + STMMAC_STAT(vlan_tag_priority_val), + STMMAC_STAT(l3_filter_match), + STMMAC_STAT(l4_filter_match), + STMMAC_STAT(l3_l4_filter_no_match), + /* PCS */ + STMMAC_STAT(irq_pcs_ane_n), + STMMAC_STAT(irq_pcs_link_n), + STMMAC_STAT(irq_rgmii_n), + /* DEBUG */ + STMMAC_STAT(mtl_tx_status_fifo_full), + STMMAC_STAT(mtl_tx_fifo_not_empty), + STMMAC_STAT(mmtl_fifo_ctrl), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_write), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_read), + STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle), + STMMAC_STAT(mac_tx_in_pause), + STMMAC_STAT(mac_tx_frame_ctrl_xfer), + STMMAC_STAT(mac_tx_frame_ctrl_idle), + STMMAC_STAT(mac_tx_frame_ctrl_wait), + STMMAC_STAT(mac_tx_frame_ctrl_pause), + STMMAC_STAT(mac_gmii_tx_proto_engine), + STMMAC_STAT(mtl_rx_fifo_fill_level_full), + STMMAC_STAT(mtl_rx_fifo_fill_above_thresh), + STMMAC_STAT(mtl_rx_fifo_fill_below_thresh), + STMMAC_STAT(mtl_rx_fifo_fill_level_empty), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_status), + STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle), + STMMAC_STAT(mtl_rx_fifo_ctrl_active), + STMMAC_STAT(mac_rx_frame_ctrl_fifo), + STMMAC_STAT(mac_gmii_rx_proto_engine), + /* TSO */ + STMMAC_STAT(tx_tso_frames), + STMMAC_STAT(tx_tso_nfrags), + /* EST */ + STMMAC_STAT(mtl_est_cgce), + STMMAC_STAT(mtl_est_hlbs), + STMMAC_STAT(mtl_est_hlbf), + STMMAC_STAT(mtl_est_btre), + STMMAC_STAT(mtl_est_btrlm), +}; +#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) + +/* HW MAC Management counters (if supported) */ +#define STMMAC_MMC_STAT(m) \ + { #m, sizeof_field(struct stmmac_counters, m), \ + offsetof(struct stmmac_priv, mmc.m)} + +static const struct stmmac_stats stmmac_mmc[] = { + STMMAC_MMC_STAT(mmc_tx_octetcount_gb), + STMMAC_MMC_STAT(mmc_tx_framecount_gb), + STMMAC_MMC_STAT(mmc_tx_broadcastframe_g), + STMMAC_MMC_STAT(mmc_tx_multicastframe_g), + STMMAC_MMC_STAT(mmc_tx_64_octets_gb), + STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb), + STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb), + STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb), + STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb), + STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb), + STMMAC_MMC_STAT(mmc_tx_unicast_gb), + STMMAC_MMC_STAT(mmc_tx_multicast_gb), + STMMAC_MMC_STAT(mmc_tx_broadcast_gb), + STMMAC_MMC_STAT(mmc_tx_underflow_error), + STMMAC_MMC_STAT(mmc_tx_singlecol_g), + STMMAC_MMC_STAT(mmc_tx_multicol_g), + STMMAC_MMC_STAT(mmc_tx_deferred), + STMMAC_MMC_STAT(mmc_tx_latecol), + STMMAC_MMC_STAT(mmc_tx_exesscol), + STMMAC_MMC_STAT(mmc_tx_carrier_error), + STMMAC_MMC_STAT(mmc_tx_octetcount_g), + STMMAC_MMC_STAT(mmc_tx_framecount_g), + STMMAC_MMC_STAT(mmc_tx_excessdef), + STMMAC_MMC_STAT(mmc_tx_pause_frame), + STMMAC_MMC_STAT(mmc_tx_vlan_frame_g), + STMMAC_MMC_STAT(mmc_rx_framecount_gb), + STMMAC_MMC_STAT(mmc_rx_octetcount_gb), + STMMAC_MMC_STAT(mmc_rx_octetcount_g), + STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), + STMMAC_MMC_STAT(mmc_rx_multicastframe_g), + STMMAC_MMC_STAT(mmc_rx_crc_error), + STMMAC_MMC_STAT(mmc_rx_align_error), + STMMAC_MMC_STAT(mmc_rx_run_error), + STMMAC_MMC_STAT(mmc_rx_jabber_error), + STMMAC_MMC_STAT(mmc_rx_undersize_g), + STMMAC_MMC_STAT(mmc_rx_oversize_g), + STMMAC_MMC_STAT(mmc_rx_64_octets_gb), + STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb), + STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb), + STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb), + STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb), + STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb), + STMMAC_MMC_STAT(mmc_rx_unicast_g), + STMMAC_MMC_STAT(mmc_rx_length_error), + STMMAC_MMC_STAT(mmc_rx_autofrangetype), + STMMAC_MMC_STAT(mmc_rx_pause_frames), + STMMAC_MMC_STAT(mmc_rx_fifo_overflow), + STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb), + STMMAC_MMC_STAT(mmc_rx_watchdog_error), + STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask), + STMMAC_MMC_STAT(mmc_rx_ipc_intr), + STMMAC_MMC_STAT(mmc_rx_ipv4_gd), + STMMAC_MMC_STAT(mmc_rx_ipv4_hderr), + STMMAC_MMC_STAT(mmc_rx_ipv4_nopay), + STMMAC_MMC_STAT(mmc_rx_ipv4_frag), + STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl), + STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_gd), + STMMAC_MMC_STAT(mmc_rx_ipv6_hderr), + STMMAC_MMC_STAT(mmc_rx_ipv6_nopay), + STMMAC_MMC_STAT(mmc_rx_udp_gd), + STMMAC_MMC_STAT(mmc_rx_udp_err), + STMMAC_MMC_STAT(mmc_rx_tcp_gd), + STMMAC_MMC_STAT(mmc_rx_tcp_err), + STMMAC_MMC_STAT(mmc_rx_icmp_gd), + STMMAC_MMC_STAT(mmc_rx_icmp_err), + STMMAC_MMC_STAT(mmc_rx_udp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_udp_err_octets), + STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_tcp_err_octets), + STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_icmp_err_octets), + STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr), + STMMAC_MMC_STAT(mmc_tx_hold_req_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr), + STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr), +}; +#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc) + +static const char stmmac_qstats_tx_string[][ETH_GSTRING_LEN] = { + "tx_pkt_n", + "tx_irq_n", +#define STMMAC_TXQ_STATS ARRAY_SIZE(stmmac_qstats_tx_string) +}; + +static const char stmmac_qstats_rx_string[][ETH_GSTRING_LEN] = { + "rx_pkt_n", + "rx_irq_n", +#define STMMAC_RXQ_STATS ARRAY_SIZE(stmmac_qstats_rx_string) +}; + +static void stmmac_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->has_gmac || priv->plat->has_gmac4) + strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); + else if (priv->plat->has_xgmac) + strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver)); + else + strscpy(info->driver, MAC100_ETHTOOL_NAME, + sizeof(info->driver)); + + if (priv->plat->pdev) { + strscpy(info->bus_info, pci_name(priv->plat->pdev), + sizeof(info->bus_info)); + } +} + +static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { + struct rgmii_adv adv; + u32 supported, advertising, lp_advertising; + + if (!priv->xstats.pcs_link) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + return 0; + } + cmd->base.duplex = priv->xstats.pcs_duplex; + + cmd->base.speed = priv->xstats.pcs_speed; + + /* Get and convert ADV/LP_ADV from the HW AN registers */ + if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv)) + return -EOPNOTSUPP; /* should never happen indeed */ + + /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ + + ethtool_convert_link_mode_to_legacy_u32( + &supported, cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32( + &advertising, cmd->link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32( + &lp_advertising, cmd->link_modes.lp_advertising); + + if (adv.pause & STMMAC_PCS_PAUSE) + advertising |= ADVERTISED_Pause; + if (adv.pause & STMMAC_PCS_ASYM_PAUSE) + advertising |= ADVERTISED_Asym_Pause; + if (adv.lp_pause & STMMAC_PCS_PAUSE) + lp_advertising |= ADVERTISED_Pause; + if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) + lp_advertising |= ADVERTISED_Asym_Pause; + + /* Reg49[3] always set because ANE is always supported */ + cmd->base.autoneg = ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; + lp_advertising |= ADVERTISED_Autoneg; + + if (adv.duplex) { + supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Full); + advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); + } else { + supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Half); + advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + } + if (adv.lp_duplex) + lp_advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); + else + lp_advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + cmd->base.port = PORT_OTHER; + + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.supported, supported); + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.advertising, advertising); + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.lp_advertising, lp_advertising); + + return 0; + } + + return phylink_ethtool_ksettings_get(priv->phylink, cmd); +} + +static int +stmmac_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { + u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; + + /* Only support ANE */ + if (cmd->base.autoneg != AUTONEG_ENABLE) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + mutex_lock(&priv->lock); + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); + mutex_unlock(&priv->lock); + + return 0; + } + + return phylink_ethtool_ksettings_set(priv->phylink, cmd); +} + +static u32 stmmac_ethtool_getmsglevel(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct stmmac_priv *priv = netdev_priv(dev); + priv->msg_enable = level; + +} + +static int stmmac_check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EBUSY; + return 0; +} + +static int stmmac_ethtool_get_regs_len(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->has_xgmac) + return XGMAC_REGSIZE * 4; + else if (priv->plat->has_gmac4) + return GMAC4_REG_SPACE_SIZE; + return REG_SPACE_SIZE; +} + +static void stmmac_ethtool_gregs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 *reg_space = (u32 *) space; + + stmmac_dump_mac_regs(priv, priv->hw, reg_space); + stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); + + /* Copy DMA registers to where ethtool expects them */ + if (priv->plat->has_gmac4) { + /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */ + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[GMAC4_DMA_CHAN_BASE_ADDR / 4], + NUM_DWMAC4_DMA_REGS * 4); + } else if (!priv->plat->has_xgmac) { + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[DMA_BUS_MODE / 4], + NUM_DWMAC1000_DMA_REGS * 4); + } +} + +static int stmmac_nway_reset(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return phylink_ethtool_nway_reset(priv->phylink); +} + +static void stmmac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + ring->rx_max_pending = DMA_MAX_RX_SIZE; + ring->tx_max_pending = DMA_MAX_TX_SIZE; + ring->rx_pending = priv->dma_conf.dma_rx_size; + ring->tx_pending = priv->dma_conf.dma_tx_size; +} + +static int stmmac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + if (ring->rx_mini_pending || ring->rx_jumbo_pending || + ring->rx_pending < DMA_MIN_RX_SIZE || + ring->rx_pending > DMA_MAX_RX_SIZE || + !is_power_of_2(ring->rx_pending) || + ring->tx_pending < DMA_MIN_TX_SIZE || + ring->tx_pending > DMA_MAX_TX_SIZE || + !is_power_of_2(ring->tx_pending)) + return -EINVAL; + + return stmmac_reinit_ringparam(netdev, ring->rx_pending, + ring->tx_pending); +} + +static void +stmmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + struct rgmii_adv adv_lp; + + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { + pause->autoneg = 1; + if (!adv_lp.pause) + return; + } else { + phylink_ethtool_get_pauseparam(priv->phylink, pause); + } +} + +static int +stmmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + struct rgmii_adv adv_lp; + + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { + pause->autoneg = 1; + if (!adv_lp.pause) + return -EOPNOTSUPP; + return 0; + } else { + return phylink_ethtool_set_pauseparam(priv->phylink, pause); + } +} + +static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int q, stat; + char *p; + + for (q = 0; q < tx_cnt; q++) { + p = (char *)priv + offsetof(struct stmmac_priv, + xstats.txq_stats[q].tx_pkt_n); + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { + *data++ = (*(unsigned long *)p); + p += sizeof(unsigned long); + } + } + for (q = 0; q < rx_cnt; q++) { + p = (char *)priv + offsetof(struct stmmac_priv, + xstats.rxq_stats[q].rx_pkt_n); + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { + *data++ = (*(unsigned long *)p); + p += sizeof(unsigned long); + } + } +} + +static void stmmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *dummy, u64 *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 tx_queues_count = priv->plat->tx_queues_to_use; + unsigned long count; + int i, j = 0, ret; + + if (priv->dma_cap.asp) { + for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { + if (!stmmac_safety_feat_dump(priv, &priv->sstats, i, + &count, NULL)) + data[j++] = count; + } + } + + /* Update the DMA HW counters for dwmac10/100 */ + ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats, + priv->ioaddr); + if (ret) { + /* If supported, for new GMAC chips expose the MMC counters */ + if (priv->dma_cap.rmon) { + stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc); + + for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { + char *p; + p = (char *)priv + stmmac_mmc[i].stat_offset; + + data[j++] = (stmmac_mmc[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : + (*(u32 *)p); + } + } + if (priv->eee_enabled) { + int val = phylink_get_eee_err(priv->phylink); + if (val) + priv->xstats.phy_eee_wakeup_error_n = val; + } + + if (priv->synopsys_id >= DWMAC_CORE_3_50) + stmmac_mac_debug(priv, priv->ioaddr, + (void *)&priv->xstats, + rx_queues_count, tx_queues_count); + } + for (i = 0; i < STMMAC_STATS_LEN; i++) { + char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; + data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); + } + stmmac_get_per_qstats(priv, &data[j]); +} + +static int stmmac_get_sset_count(struct net_device *netdev, int sset) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int i, len, safety_len = 0; + + switch (sset) { + case ETH_SS_STATS: + len = STMMAC_STATS_LEN + + STMMAC_TXQ_STATS * tx_cnt + + STMMAC_RXQ_STATS * rx_cnt; + + if (priv->dma_cap.rmon) + len += STMMAC_MMC_STATS_LEN; + if (priv->dma_cap.asp) { + for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { + if (!stmmac_safety_feat_dump(priv, + &priv->sstats, i, + NULL, NULL)) + safety_len++; + } + + len += safety_len; + } + + return len; + case ETH_SS_TEST: + return stmmac_selftest_get_count(priv); + default: + return -EOPNOTSUPP; + } +} + +static void stmmac_get_qstats_string(struct stmmac_priv *priv, u8 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int q, stat; + + for (q = 0; q < tx_cnt; q++) { + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_tx_string[stat]); + data += ETH_GSTRING_LEN; + } + } + for (q = 0; q < rx_cnt; q++) { + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_rx_string[stat]); + data += ETH_GSTRING_LEN; + } + } +} + +static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + struct stmmac_priv *priv = netdev_priv(dev); + + switch (stringset) { + case ETH_SS_STATS: + if (priv->dma_cap.asp) { + for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { + const char *desc; + if (!stmmac_safety_feat_dump(priv, + &priv->sstats, i, + NULL, &desc)) { + memcpy(p, desc, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } + } + if (priv->dma_cap.rmon) + for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { + memcpy(p, stmmac_mmc[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < STMMAC_STATS_LEN; i++) { + memcpy(p, stmmac_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + stmmac_get_qstats_string(priv, p); + break; + case ETH_SS_TEST: + stmmac_selftest_get_strings(priv, p); + break; + default: + WARN_ON(1); + break; + } +} + +/* Currently only support WOL through Magic packet. */ +static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->plat->pmt) + return phylink_ethtool_get_wol(priv->phylink, wol); + + mutex_lock(&priv->lock); + if (device_can_wakeup(priv->device)) { + wol->supported = WAKE_MAGIC | WAKE_UCAST; + if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame) + wol->supported &= ~WAKE_MAGIC; + wol->wolopts = priv->wolopts; + } + mutex_unlock(&priv->lock); +} + +static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 support = WAKE_MAGIC | WAKE_UCAST; + + if (!device_can_wakeup(priv->device)) + return -EOPNOTSUPP; + + if (!priv->plat->pmt) { + int ret = phylink_ethtool_set_wol(priv->phylink, wol); + + if (!ret) + device_set_wakeup_enable(priv->device, !!wol->wolopts); + return ret; + } + + /* By default almost all GMAC devices support the WoL via + * magic frame but we can disable it if the HW capability + * register shows no support for pmt_magic_frame. */ + if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame)) + wol->wolopts &= ~WAKE_MAGIC; + + if (wol->wolopts & ~support) + return -EINVAL; + + if (wol->wolopts) { + pr_info("stmmac: wakeup enable\n"); + device_set_wakeup_enable(priv->device, 1); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; + } else { + device_set_wakeup_enable(priv->device, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; + } + + mutex_lock(&priv->lock); + priv->wolopts = wol->wolopts; + mutex_unlock(&priv->lock); + + return 0; +} + +static int stmmac_ethtool_op_get_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->dma_cap.eee) + return -EOPNOTSUPP; + + edata->eee_enabled = priv->eee_enabled; + edata->eee_active = priv->eee_active; + edata->tx_lpi_timer = priv->tx_lpi_timer; + edata->tx_lpi_enabled = priv->tx_lpi_enabled; + + return phylink_ethtool_get_eee(priv->phylink, edata); +} + +static int stmmac_ethtool_op_set_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + if (!priv->dma_cap.eee) + return -EOPNOTSUPP; + + if (priv->tx_lpi_enabled != edata->tx_lpi_enabled) + netdev_warn(priv->dev, + "Setting EEE tx-lpi is not supported\n"); + + if (!edata->eee_enabled) + stmmac_disable_eee_mode(priv); + + ret = phylink_ethtool_set_eee(priv->phylink, edata); + if (ret) + return ret; + + if (edata->eee_enabled && + priv->tx_lpi_timer != edata->tx_lpi_timer) { + priv->tx_lpi_timer = edata->tx_lpi_timer; + stmmac_eee_init(priv); + } + + return 0; +} + +static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) +{ + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); + + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } + + return (usec * (clk / 1000000)) / 256; +} + +static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) +{ + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); + + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } + + return (riwt * 256) / (clk / 1000000); +} + +static int __stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; + + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); + + if (queue < 0) + queue = 0; + else if (queue >= max_cnt) + return -EINVAL; + + if (queue < tx_cnt) { + ec->tx_coalesce_usecs = priv->tx_coal_timer[queue]; + ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue]; + } else { + ec->tx_coalesce_usecs = 0; + ec->tx_max_coalesced_frames = 0; + } + + if (priv->use_riwt && queue < rx_cnt) { + ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue]; + ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue], + priv); + } else { + ec->rx_max_coalesced_frames = 0; + ec->rx_coalesce_usecs = 0; + } + + return 0; +} + +static int stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + return __stmmac_get_coalesce(dev, ec, -1); +} + +static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_get_coalesce(dev, ec, queue); +} + +static int __stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) +{ + struct stmmac_priv *priv = netdev_priv(dev); + bool all_queues = false; + unsigned int rx_riwt; + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; + + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); + + if (queue < 0) + all_queues = true; + else if (queue >= max_cnt) + return -EINVAL; + + if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) { + rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); + + if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) + return -EINVAL; + + if (all_queues) { + int i; + + for (i = 0; i < rx_cnt; i++) { + priv->rx_riwt[i] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, i); + priv->rx_coal_frames[i] = + ec->rx_max_coalesced_frames; + } + } else if (queue < rx_cnt) { + priv->rx_riwt[queue] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, queue); + priv->rx_coal_frames[queue] = + ec->rx_max_coalesced_frames; + } + } + + if ((ec->tx_coalesce_usecs == 0) && + (ec->tx_max_coalesced_frames == 0)) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) || + (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) + return -EINVAL; + + if (all_queues) { + int i; + + for (i = 0; i < tx_cnt; i++) { + priv->tx_coal_frames[i] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[i] = + ec->tx_coalesce_usecs; + } + } else if (queue < tx_cnt) { + priv->tx_coal_frames[queue] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[queue] = + ec->tx_coalesce_usecs; + } + + return 0; +} + +static int stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + return __stmmac_set_coalesce(dev, ec, -1); +} + +static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_set_coalesce(dev, ec, queue); +} + +static int stmmac_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->plat->rx_queues_to_use; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 stmmac_get_rxfh_key_size(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return sizeof(priv->rss.key); +} + +static u32 stmmac_get_rxfh_indir_size(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return ARRAY_SIZE(priv->rss.table); +} + +static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + if (indir) { + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + indir[i] = priv->rss.table[i]; + } + + if (key) + memcpy(key, priv->rss.key, sizeof(priv->rss.key)); + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (indir) { + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + priv->rss.table[i] = indir[i]; + } + + if (key) + memcpy(priv->rss.key, key, sizeof(priv->rss.key)); + + return stmmac_rss_configure(priv, priv->hw, &priv->rss, + priv->plat->rx_queues_to_use); +} + +static void stmmac_get_channels(struct net_device *dev, + struct ethtool_channels *chan) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + chan->rx_count = priv->plat->rx_queues_to_use; + chan->tx_count = priv->plat->tx_queues_to_use; + chan->max_rx = priv->dma_cap.number_rx_queues; + chan->max_tx = priv->dma_cap.number_tx_queues; +} + +static int stmmac_set_channels(struct net_device *dev, + struct ethtool_channels *chan) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (chan->rx_count > priv->dma_cap.number_rx_queues || + chan->tx_count > priv->dma_cap.number_tx_queues || + !chan->rx_count || !chan->tx_count) + return -EINVAL; + + return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count); +} + +static int stmmac_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) { + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (priv->ptp_clock) + info->phc_index = ptp_clock_index(priv->ptp_clock); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL)); + return 0; + } else + return ethtool_op_get_ts_info(dev, info); +} + +static int stmmac_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = priv->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int stmmac_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + priv->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct ethtool_ops stmmac_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .begin = stmmac_check_if_running, + .get_drvinfo = stmmac_ethtool_getdrvinfo, + .get_msglevel = stmmac_ethtool_getmsglevel, + .set_msglevel = stmmac_ethtool_setmsglevel, + .get_regs = stmmac_ethtool_gregs, + .get_regs_len = stmmac_ethtool_get_regs_len, + .get_link = ethtool_op_get_link, + .nway_reset = stmmac_nway_reset, + .get_ringparam = stmmac_get_ringparam, + .set_ringparam = stmmac_set_ringparam, + .get_pauseparam = stmmac_get_pauseparam, + .set_pauseparam = stmmac_set_pauseparam, + .self_test = stmmac_selftest_run, + .get_ethtool_stats = stmmac_get_ethtool_stats, + .get_strings = stmmac_get_strings, + .get_wol = stmmac_get_wol, + .set_wol = stmmac_set_wol, + .get_eee = stmmac_ethtool_op_get_eee, + .set_eee = stmmac_ethtool_op_set_eee, + .get_sset_count = stmmac_get_sset_count, + .get_rxnfc = stmmac_get_rxnfc, + .get_rxfh_key_size = stmmac_get_rxfh_key_size, + .get_rxfh_indir_size = stmmac_get_rxfh_indir_size, + .get_rxfh = stmmac_get_rxfh, + .set_rxfh = stmmac_set_rxfh, + .get_ts_info = stmmac_get_ts_info, + .get_coalesce = stmmac_get_coalesce, + .set_coalesce = stmmac_set_coalesce, + .get_per_queue_coalesce = stmmac_get_per_queue_coalesce, + .set_per_queue_coalesce = stmmac_set_per_queue_coalesce, + .get_channels = stmmac_get_channels, + .set_channels = stmmac_set_channels, + .get_tunable = stmmac_get_tunable, + .set_tunable = stmmac_set_tunable, + .get_link_ksettings = stmmac_ethtool_get_link_ksettings, + .set_link_ksettings = stmmac_ethtool_set_link_ksettings, +}; + +void stmmac_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &stmmac_ethtool_ops; +} diff --git a/devices/stmmac/stmmac_hwtstamp-6.1-ethercat.c b/devices/stmmac/stmmac_hwtstamp-6.1-ethercat.c new file mode 100644 index 00000000..7899ec7c --- /dev/null +++ b/devices/stmmac/stmmac_hwtstamp-6.1-ethercat.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + This implements all the API for managing HW timestamp & PTP. + + + Author: Rayagond Kokatanur + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include "common-6.1-ethercat.h" +#include "stmmac_ptp-6.1-ethercat.h" +#include "dwmac4-6.1-ethercat.h" +#include "stmmac-6.1-ethercat.h" + +static void config_hw_tstamping(void __iomem *ioaddr, u32 data) +{ + writel(data, ioaddr + PTP_TCR); +} + +static void config_sub_second_increment(void __iomem *ioaddr, + u32 ptp_clock, int gmac4, u32 *ssinc) +{ + u32 value = readl(ioaddr + PTP_TCR); + unsigned long data; + u32 reg_value; + + /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second + * increment to twice the number of nanoseconds of a clock cycle. + * The calculation of the default_addend value by the caller will set it + * to mid-range = 2^31 when the remainder of this division is zero, + * which will make the accumulator overflow once every 2 ptp_clock + * cycles, adding twice the number of nanoseconds of a clock cycle : + * 2000000000ULL / ptp_clock. + */ + if (value & PTP_TCR_TSCFUPDT) + data = (2000000000ULL / ptp_clock); + else + data = (1000000000ULL / ptp_clock); + + /* 0.465ns accuracy */ + if (!(value & PTP_TCR_TSCTRLSSR)) + data = (data * 1000) / 465; + + if (data > PTP_SSIR_SSINC_MAX) + data = PTP_SSIR_SSINC_MAX; + + reg_value = data; + if (gmac4) + reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT; + + writel(reg_value, ioaddr + PTP_SSIR); + + if (ssinc) + *ssinc = data; +} + +static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) +{ + u32 value; + + writel(sec, ioaddr + PTP_STSUR); + writel(nsec, ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSINIT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time initialize to complete */ + return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value, + !(value & PTP_TCR_TSINIT), + 10, 100000); +} + +static int config_addend(void __iomem *ioaddr, u32 addend) +{ + u32 value; + int limit; + + writel(addend, ioaddr + PTP_TAR); + /* issue command to update the addend value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSADDREG; + writel(value, ioaddr + PTP_TCR); + + /* wait for present addend update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4) +{ + u32 value; + int limit; + + if (add_sub) { + /* If the new sec value needs to be subtracted with + * the system time, then MAC_STSUR reg should be + * programmed with (2^32 – ) + */ + if (gmac4) + sec = -sec; + + value = readl(ioaddr + PTP_TCR); + if (value & PTP_TCR_TSCTRLSSR) + nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec); + else + nsec = (PTP_BINARY_ROLLOVER_MODE - nsec); + } + + writel(sec, ioaddr + PTP_STSUR); + value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec; + writel(value, ioaddr + PTP_STNSUR); + + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSUPDT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time adjust/update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static void get_systime(void __iomem *ioaddr, u64 *systime) +{ + u64 ns, sec0, sec1; + + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + do { + sec0 = sec1; + /* Get the TSSS value */ + ns = readl_relaxed(ioaddr + PTP_STNSR); + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + } while (sec0 != sec1); + + if (systime) + *systime = ns + (sec1 * 1000000000ULL); +} + +static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) +{ + u64 ns; + + ns = readl(ptpaddr + PTP_ATNR); + ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC; + + *ptp_time = ns; +} + +static void timestamp_interrupt(struct stmmac_priv *priv) +{ + u32 num_snapshot, ts_status, tsync_int; + struct ptp_clock_event event; + unsigned long flags; + u64 ptp_time; + int i; + + if (priv->plat->int_snapshot_en) { + wake_up(&priv->tstamp_busy_wait); + return; + } + + tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE; + + if (!tsync_int) + return; + + /* Read timestamp status to clear interrupt from either external + * timestamp or start/end of PPS. + */ + ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS); + + if (!priv->plat->ext_snapshot_en) + return; + + num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + for (i = 0; i < num_snapshot; i++) { + read_lock_irqsave(&priv->ptp_lock, flags); + get_ptptime(priv->ptpaddr, &ptp_time); + read_unlock_irqrestore(&priv->ptp_lock, flags); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ptp_time; + ptp_clock_event(priv->ptp_clock, &event); + } +} + +const struct stmmac_hwtimestamp stmmac_ptp = { + .config_hw_tstamping = config_hw_tstamping, + .init_systime = init_systime, + .config_sub_second_increment = config_sub_second_increment, + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, + .get_ptptime = get_ptptime, + .timestamp_interrupt = timestamp_interrupt, +}; diff --git a/devices/stmmac/stmmac_hwtstamp-6.1-orig.c b/devices/stmmac/stmmac_hwtstamp-6.1-orig.c new file mode 100644 index 00000000..8b50f030 --- /dev/null +++ b/devices/stmmac/stmmac_hwtstamp-6.1-orig.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + This implements all the API for managing HW timestamp & PTP. + + + Author: Rayagond Kokatanur + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include "common.h" +#include "stmmac_ptp.h" +#include "dwmac4.h" +#include "stmmac.h" + +static void config_hw_tstamping(void __iomem *ioaddr, u32 data) +{ + writel(data, ioaddr + PTP_TCR); +} + +static void config_sub_second_increment(void __iomem *ioaddr, + u32 ptp_clock, int gmac4, u32 *ssinc) +{ + u32 value = readl(ioaddr + PTP_TCR); + unsigned long data; + u32 reg_value; + + /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second + * increment to twice the number of nanoseconds of a clock cycle. + * The calculation of the default_addend value by the caller will set it + * to mid-range = 2^31 when the remainder of this division is zero, + * which will make the accumulator overflow once every 2 ptp_clock + * cycles, adding twice the number of nanoseconds of a clock cycle : + * 2000000000ULL / ptp_clock. + */ + if (value & PTP_TCR_TSCFUPDT) + data = (2000000000ULL / ptp_clock); + else + data = (1000000000ULL / ptp_clock); + + /* 0.465ns accuracy */ + if (!(value & PTP_TCR_TSCTRLSSR)) + data = (data * 1000) / 465; + + if (data > PTP_SSIR_SSINC_MAX) + data = PTP_SSIR_SSINC_MAX; + + reg_value = data; + if (gmac4) + reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT; + + writel(reg_value, ioaddr + PTP_SSIR); + + if (ssinc) + *ssinc = data; +} + +static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) +{ + u32 value; + + writel(sec, ioaddr + PTP_STSUR); + writel(nsec, ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSINIT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time initialize to complete */ + return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value, + !(value & PTP_TCR_TSINIT), + 10, 100000); +} + +static int config_addend(void __iomem *ioaddr, u32 addend) +{ + u32 value; + int limit; + + writel(addend, ioaddr + PTP_TAR); + /* issue command to update the addend value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSADDREG; + writel(value, ioaddr + PTP_TCR); + + /* wait for present addend update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4) +{ + u32 value; + int limit; + + if (add_sub) { + /* If the new sec value needs to be subtracted with + * the system time, then MAC_STSUR reg should be + * programmed with (2^32 – ) + */ + if (gmac4) + sec = -sec; + + value = readl(ioaddr + PTP_TCR); + if (value & PTP_TCR_TSCTRLSSR) + nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec); + else + nsec = (PTP_BINARY_ROLLOVER_MODE - nsec); + } + + writel(sec, ioaddr + PTP_STSUR); + value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec; + writel(value, ioaddr + PTP_STNSUR); + + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSUPDT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time adjust/update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static void get_systime(void __iomem *ioaddr, u64 *systime) +{ + u64 ns, sec0, sec1; + + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + do { + sec0 = sec1; + /* Get the TSSS value */ + ns = readl_relaxed(ioaddr + PTP_STNSR); + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + } while (sec0 != sec1); + + if (systime) + *systime = ns + (sec1 * 1000000000ULL); +} + +static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) +{ + u64 ns; + + ns = readl(ptpaddr + PTP_ATNR); + ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC; + + *ptp_time = ns; +} + +static void timestamp_interrupt(struct stmmac_priv *priv) +{ + u32 num_snapshot, ts_status, tsync_int; + struct ptp_clock_event event; + unsigned long flags; + u64 ptp_time; + int i; + + if (priv->plat->int_snapshot_en) { + wake_up(&priv->tstamp_busy_wait); + return; + } + + tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE; + + if (!tsync_int) + return; + + /* Read timestamp status to clear interrupt from either external + * timestamp or start/end of PPS. + */ + ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS); + + if (!priv->plat->ext_snapshot_en) + return; + + num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + for (i = 0; i < num_snapshot; i++) { + read_lock_irqsave(&priv->ptp_lock, flags); + get_ptptime(priv->ptpaddr, &ptp_time); + read_unlock_irqrestore(&priv->ptp_lock, flags); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ptp_time; + ptp_clock_event(priv->ptp_clock, &event); + } +} + +const struct stmmac_hwtimestamp stmmac_ptp = { + .config_hw_tstamping = config_hw_tstamping, + .init_systime = init_systime, + .config_sub_second_increment = config_sub_second_increment, + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, + .get_ptptime = get_ptptime, + .timestamp_interrupt = timestamp_interrupt, +}; diff --git a/devices/stmmac/stmmac_main-6.1-ethercat.c b/devices/stmmac/stmmac_main-6.1-ethercat.c new file mode 100644 index 00000000..bd6eea00 --- /dev/null +++ b/devices/stmmac/stmmac_main-6.1-ethercat.c @@ -0,0 +1,7841 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. + ST Ethernet IPs are built around a Synopsys IP Core. + + Copyright(C) 2007-2011 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro + + Documentation available at: + http://www.stlinux.com + Support available at: + https://bugzilla.stlinux.com/ +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_DEBUG_FS +#include +#include +#endif /* CONFIG_DEBUG_FS */ +#include +#include +#include +#include +#include +#include +#include "stmmac_ptp-6.1-ethercat.h" +#include "stmmac-6.1-ethercat.h" +#include "stmmac_xdp-6.1-ethercat.h" +#include +#include +#include "dwmac1000-6.1-ethercat.h" +#include "dwxgmac2-6.1-ethercat.h" +#include "hwif-6.1-ethercat.h" + +static inline void ec_txq_trans_cond_update(struct netdev_queue *txq) +{ + unsigned long now = jiffies; + + if (READ_ONCE(txq->trans_start) != now) + WRITE_ONCE(txq->trans_start, now); +} + +/* As long as the interface is active, we keep the timestamping counter enabled + * with fine resolution and binary rollover. This avoid non-monotonic behavior + * (clock jumps) when changing timestamping settings at runtime. + */ +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ + PTP_TCR_TSCTRLSSR) + +#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) +#define TSO_MAX_BUFF_SIZE (SZ_16K - 1) + +/* Module parameters */ +#define TX_TIMEO 5000 +static int watchdog = TX_TIMEO; +module_param(watchdog, int, 0644); +MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); + +static int debug = -1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); + +static int phyaddr = -1; +module_param(phyaddr, int, 0444); +MODULE_PARM_DESC(phyaddr, "Physical device address"); + +#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) +#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) + +/* Limit to make sure XDP TX and slow path can coexist */ +#define STMMAC_XSK_TX_BUDGET_MAX 256 +#define STMMAC_TX_XSK_AVAIL 16 +#define STMMAC_RX_FILL_BATCH 16 + +#define STMMAC_XDP_PASS 0 +#define STMMAC_XDP_CONSUMED BIT(0) +#define STMMAC_XDP_TX BIT(1) +#define STMMAC_XDP_REDIRECT BIT(2) + +static int flow_ctrl = FLOW_AUTO; +module_param(flow_ctrl, int, 0644); +MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); + +static int pause = PAUSE_TIME; +module_param(pause, int, 0644); +MODULE_PARM_DESC(pause, "Flow Control Pause Time"); + +#define TC_DEFAULT 64 +static int tc = TC_DEFAULT; +module_param(tc, int, 0644); +MODULE_PARM_DESC(tc, "DMA threshold control value"); + +#define DEFAULT_BUFSIZE 1536 +static int buf_sz = DEFAULT_BUFSIZE; +module_param(buf_sz, int, 0644); +MODULE_PARM_DESC(buf_sz, "DMA buffer size"); + +#define STMMAC_RX_COPYBREAK 256 + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); + +#define STMMAC_DEFAULT_LPI_TIMER 1000 +static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; +module_param(eee_timer, int, 0644); +MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); +#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) + +/* By default the driver will use the ring mode to manage tx and rx descriptors, + * but allow user to force to use the chain instead of the ring + */ +static unsigned int chain_mode; +module_param(chain_mode, int, 0444); +MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); + +static irqreturn_t stmmac_interrupt(int irq, void *dev_id); +/* For MSI interrupts handling */ +static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); +static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); +static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); +static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); +static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); +static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); +static void stmmac_reset_queues_param(struct stmmac_priv *priv); +static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); +static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, + u32 rxmode, u32 chan); + +#ifdef CONFIG_DEBUG_FS +static const struct net_device_ops stmmac_netdev_ops; +static void stmmac_init_fs(struct net_device *dev); +static void stmmac_exit_fs(struct net_device *dev); +#endif + +#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) + +int ec_stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) +{ + int ret = 0; + + if (enabled) { + ret = clk_prepare_enable(priv->plat->stmmac_clk); + if (ret) + return ret; + ret = clk_prepare_enable(priv->plat->pclk); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + return ret; + } + if (priv->plat->clks_config) { + ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + return ret; + } + } + } else { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + if (priv->plat->clks_config) + priv->plat->clks_config(priv->plat->bsp_priv, enabled); + } + + return ret; +} + +/** + * stmmac_verify_args - verify the driver parameters. + * Description: it checks the driver parameters and set a default in case of + * errors. + */ +static void stmmac_verify_args(void) +{ + if (unlikely(watchdog < 0)) + watchdog = TX_TIMEO; + if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) + buf_sz = DEFAULT_BUFSIZE; + if (unlikely(flow_ctrl > 1)) + flow_ctrl = FLOW_AUTO; + else if (likely(flow_ctrl < 0)) + flow_ctrl = FLOW_OFF; + if (unlikely((pause < 0) || (pause > 0xffff))) + pause = PAUSE_TIME; + if (eee_timer < 0) + eee_timer = STMMAC_DEFAULT_LPI_TIMER; +} + +static void __stmmac_disable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); + u32 queue; + + if (get_ecdev(priv)) + return; + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_disable(&ch->rxtx_napi); + continue; + } + + if (queue < rx_queues_cnt) + napi_disable(&ch->rx_napi); + if (queue < tx_queues_cnt) + napi_disable(&ch->tx_napi); + } +} + +/** + * stmmac_disable_all_queues - Disable all queues + * @priv: driver private structure + */ +static void stmmac_disable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + struct stmmac_rx_queue *rx_q; + u32 queue; + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + for (queue = 0; queue < rx_queues_cnt; queue++) { + rx_q = &priv->dma_conf.rx_queue[queue]; + if (rx_q->xsk_pool) { + synchronize_rcu(); + break; + } + } + if (get_ecdev(priv)) + return; + + __stmmac_disable_all_queues(priv); +} + +/** + * stmmac_enable_all_queues - Enable all queues + * @priv: driver private structure + */ +static void stmmac_enable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); + u32 queue; + + if (get_ecdev(priv)) + return; + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_enable(&ch->rxtx_napi); + continue; + } + + if (queue < rx_queues_cnt) + napi_enable(&ch->rx_napi); + if (queue < tx_queues_cnt) + napi_enable(&ch->tx_napi); + } +} + +static void stmmac_service_event_schedule(struct stmmac_priv *priv) +{ + if (!test_bit(STMMAC_DOWN, &priv->state) && + !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) + queue_work(priv->wq, &priv->service_task); +} + +static void stmmac_global_err(struct stmmac_priv *priv) +{ + if (get_ecdev(priv)) { + ecdev_set_link(get_ecdev(priv), 0); + } else { + netif_carrier_off(priv->dev); + } + set_bit(STMMAC_RESET_REQUESTED, &priv->state); + stmmac_service_event_schedule(priv); +} + +/** + * stmmac_clk_csr_set - dynamically set the MDC clock + * @priv: driver private structure + * Description: this is to dynamically set the MDC clock according to the csr + * clock input. + * Note: + * If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be + * changed at run-time and it is fixed (as reported in the driver + * documentation). Viceversa the driver will try to set the MDC + * clock dynamically according to the actual clock input. + */ +static void stmmac_clk_csr_set(struct stmmac_priv *priv) +{ + u32 clk_rate; + + clk_rate = clk_get_rate(priv->plat->stmmac_clk); + + /* Platform provided default clk_csr would be assumed valid + * for all other cases except for the below mentioned ones. + * For values higher than the IEEE 802.3 specified frequency + * we can not estimate the proper divider as it is not known + * the frequency of clk_csr_i. So we do not change the default + * divider. + */ + if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { + if (clk_rate < CSR_F_35M) + priv->clk_csr = STMMAC_CSR_20_35M; + else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) + priv->clk_csr = STMMAC_CSR_35_60M; + else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) + priv->clk_csr = STMMAC_CSR_60_100M; + else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) + priv->clk_csr = STMMAC_CSR_100_150M; + else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) + priv->clk_csr = STMMAC_CSR_150_250M; + else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) + priv->clk_csr = STMMAC_CSR_250_300M; + } + + if (priv->plat->has_sun8i) { + if (clk_rate > 160000000) + priv->clk_csr = 0x03; + else if (clk_rate > 80000000) + priv->clk_csr = 0x02; + else if (clk_rate > 40000000) + priv->clk_csr = 0x01; + else + priv->clk_csr = 0; + } + + if (priv->plat->has_xgmac) { + if (clk_rate > 400000000) + priv->clk_csr = 0x5; + else if (clk_rate > 350000000) + priv->clk_csr = 0x4; + else if (clk_rate > 300000000) + priv->clk_csr = 0x3; + else if (clk_rate > 250000000) + priv->clk_csr = 0x2; + else if (clk_rate > 150000000) + priv->clk_csr = 0x1; + else + priv->clk_csr = 0x0; + } +} + +static void print_pkt(unsigned char *buf, int len) +{ + pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); +} + +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + u32 avail; + + if (tx_q->dirty_tx > tx_q->cur_tx) + avail = tx_q->dirty_tx - tx_q->cur_tx - 1; + else + avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; + + return avail; +} + +/** + * stmmac_rx_dirty - Get RX queue dirty + * @priv: driver private structure + * @queue: RX queue index + */ +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + u32 dirty; + + if (rx_q->dirty_rx <= rx_q->cur_rx) + dirty = rx_q->cur_rx - rx_q->dirty_rx; + else + dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; + + return dirty; +} + +static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) +{ + int tx_lpi_timer; + + /* Clear/set the SW EEE timer flag based on LPI ET enablement */ + priv->eee_sw_timer_en = en ? 0 : 1; + tx_lpi_timer = en ? priv->tx_lpi_timer : 0; + stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); +} + +/** + * stmmac_enable_eee_mode - check and enter in LPI mode + * @priv: driver private structure + * Description: this function is to verify and enter in LPI mode in case of + * EEE. + */ +static int stmmac_enable_eee_mode(struct stmmac_priv *priv) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + /* check if all TX queues have the work finished */ + for (queue = 0; queue < tx_cnt; queue++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + + if (tx_q->dirty_tx != tx_q->cur_tx) + return -EBUSY; /* still unfinished work */ + } + + /* Check and enter in LPI mode */ + if (!priv->tx_path_in_lpi_mode) + stmmac_set_eee_mode(priv, priv->hw, + priv->plat->en_tx_lpi_clockgating); + return 0; +} + +/** + * stmmac_disable_eee_mode - disable and exit from LPI mode + * @priv: driver private structure + * Description: this function is to exit and disable EEE in case of + * LPI state is true. This is called by the xmit. + */ +void stmmac_disable_eee_mode(struct stmmac_priv *priv) +{ + if (!priv->eee_sw_timer_en) { + stmmac_lpi_entry_timer_config(priv, 0); + return; + } + + stmmac_reset_eee_mode(priv, priv->hw); + del_timer_sync(&priv->eee_ctrl_timer); + priv->tx_path_in_lpi_mode = false; +} + +/** + * stmmac_eee_ctrl_timer - EEE TX SW timer. + * @t: timer_list struct containing private info + * Description: + * if there is no data transfer and if we are not in LPI state, + * then MAC Transmitter can be moved to LPI state. + */ +static void stmmac_eee_ctrl_timer(struct timer_list *t) +{ + struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); + + if (stmmac_enable_eee_mode(priv)) + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); +} + +/** + * stmmac_eee_init - init EEE + * @priv: driver private structure + * Description: + * if the GMAC supports the EEE (from the HW cap reg) and the phy device + * can also manage EEE, this function enable the LPI state and start related + * timer. + */ +bool stmmac_eee_init(struct stmmac_priv *priv) +{ + int eee_tw_timer = priv->eee_tw_timer; + + /* Using PCS we cannot dial with the phy registers at this stage + * so we do not support extra feature like EEE. + */ + if (priv->hw->pcs == STMMAC_PCS_TBI || + priv->hw->pcs == STMMAC_PCS_RTBI) + return false; + + /* Check if MAC core supports the EEE feature. */ + if (!priv->dma_cap.eee) + return false; + + mutex_lock(&priv->lock); + + /* Check if it needs to be deactivated */ + if (!priv->eee_active) { + if (priv->eee_enabled) { + netdev_dbg(priv->dev, "disable EEE\n"); + stmmac_lpi_entry_timer_config(priv, 0); + del_timer_sync(&priv->eee_ctrl_timer); + stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); + if (priv->hw->xpcs) + xpcs_config_eee(priv->hw->xpcs, + priv->plat->mult_fact_100ns, + false); + } + mutex_unlock(&priv->lock); + return false; + } + + if (priv->eee_active && !priv->eee_enabled) { + timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); + stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, + eee_tw_timer); + if (priv->hw->xpcs) + xpcs_config_eee(priv->hw->xpcs, + priv->plat->mult_fact_100ns, + true); + } + + if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { + del_timer_sync(&priv->eee_ctrl_timer); + priv->tx_path_in_lpi_mode = false; + stmmac_lpi_entry_timer_config(priv, 1); + } else { + stmmac_lpi_entry_timer_config(priv, 0); + mod_timer(&priv->eee_ctrl_timer, + STMMAC_LPI_T(priv->tx_lpi_timer)); + } + + mutex_unlock(&priv->lock); + netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); + return true; +} + +/* stmmac_get_tx_hwtstamp - get HW TX timestamps + * @priv: driver private structure + * @p : descriptor pointer + * @skb : the socket buffer + * Description : + * This function will read timestamp from the descriptor & pass it to stack. + * and also perform some sanity checks. + */ +static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, + struct dma_desc *p, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps shhwtstamp; + bool found = false; + u64 ns = 0; + + if (!priv->hwts_tx_en) + return; + + /* exit if skb doesn't support hw tstamp */ + if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) + return; + + /* check tx tstamp status */ + if (stmmac_get_tx_timestamp_status(priv, p)) { + stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); + found = true; + } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { + found = true; + } + + if (found) { + ns -= priv->plat->cdc_error_adj; + + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp.hwtstamp = ns_to_ktime(ns); + + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); + /* pass tstamp to stack */ + skb_tstamp_tx(skb, &shhwtstamp); + } +} + +/* stmmac_get_rx_hwtstamp - get HW RX timestamps + * @priv: driver private structure + * @p : descriptor pointer + * @np : next descriptor pointer + * @skb : the socket buffer + * Description : + * This function will read received packet's timestamp from the descriptor + * and pass it to stack. It also perform some sanity checks. + */ +static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, + struct dma_desc *np, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamp = NULL; + struct dma_desc *desc = p; + u64 ns = 0; + + if (!priv->hwts_rx_en) + return; + /* For GMAC4, the valid timestamp is from CTX next desc. */ + if (priv->plat->has_gmac4 || priv->plat->has_xgmac) + desc = np; + + /* Check if timestamp is available */ + if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { + stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); + + ns -= priv->plat->cdc_error_adj; + + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); + shhwtstamp = skb_hwtstamps(skb); + memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp->hwtstamp = ns_to_ktime(ns); + } else { + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); + } +} + +/** + * stmmac_hwtstamp_set - control hardware timestamping. + * @dev: device pointer. + * @ifr: An IOCTL specific structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * Description: + * This function configures the MAC to enable/disable both outgoing(TX) + * and incoming(RX) packets time stamping based on user input. + * Return Value: + * 0 on success and an appropriate -ve integer on failure. + */ +static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + u32 ptp_v2 = 0; + u32 tstamp_all = 0; + u32 ptp_over_ipv4_udp = 0; + u32 ptp_over_ipv6_udp = 0; + u32 ptp_over_ethernet = 0; + u32 snap_type_sel = 0; + u32 ts_master_en = 0; + u32 ts_event_en = 0; + + if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { + netdev_alert(priv->dev, "No support for HW time stamping\n"); + priv->hwts_tx_en = 0; + priv->hwts_rx_en = 0; + + return -EOPNOTSUPP; + } + + if (copy_from_user(&config, ifr->ifr_data, + sizeof(config))) + return -EFAULT; + + netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + if (priv->adv_ts) { + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* time stamp no incoming packet at all */ + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* 'xmac' hardware can support Sync, Pdelay_Req and + * Pdelay_resp by setting bit14 and bits17/16 to 01 + * This leaves Delay_Req timestamps out. + * Enable all events *and* general purpose message + * timestamping + */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + /* PTP v1, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + /* PTP v1, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + /* PTP v2, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for all event messages */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* PTP v2, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* PTP v2, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_EVENT: + /* PTP v2/802.AS1 any layer, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->synopsys_id < DWMAC_CORE_4_10) + ts_event_en = PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* PTP v2/802.AS1, any layer, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* PTP v2/802.AS1, any layer, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + /* time stamp any incoming packet */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_all = PTP_TCR_TSENALL; + break; + + default: + return -ERANGE; + } + } else { + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + default: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + } + } + priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; + + priv->systime_flags = STMMAC_HWTS_ACTIVE; + + if (priv->hwts_tx_en || priv->hwts_rx_en) { + priv->systime_flags |= tstamp_all | ptp_v2 | + ptp_over_ethernet | ptp_over_ipv6_udp | + ptp_over_ipv4_udp | ts_event_en | + ts_master_en | snap_type_sel; + } + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); + + memcpy(&priv->tstamp_config, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +/** + * stmmac_hwtstamp_get - read hardware timestamping. + * @dev: device pointer. + * @ifr: An IOCTL specific structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * Description: + * This function obtain the current hardware timestamping settings + * as requested. + */ +static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct hwtstamp_config *config = &priv->tstamp_config; + + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * stmmac_init_tstamp_counter - init hardware timestamping counter + * @priv: driver private structure + * @systime_flags: timestamping flags + * Description: + * Initialize hardware counter for packet timestamping. + * This is valid as long as the interface is open and not suspended. + * Will be rerun after resuming from suspend, case in which the timestamping + * flags updated by stmmac_hwtstamp_set() also need to be restored. + */ +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) +{ + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + struct timespec64 now; + u32 sec_inc = 0; + u64 temp = 0; + + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); + priv->systime_flags = systime_flags; + + /* program Sub Second Increment reg */ + stmmac_config_sub_second_increment(priv, priv->ptpaddr, + priv->plat->clk_ptp_rate, + xmac, &sec_inc); + temp = div_u64(1000000000ULL, sec_inc); + + /* Store sub second increment for later use */ + priv->sub_second_inc = sec_inc; + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); + + /* initialize system time */ + ktime_get_real_ts64(&now); + + /* lower 32 bits of tv_sec are safe until y2106 */ + stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); + + return 0; +} + +/** + * stmmac_init_ptp - init PTP + * @priv: driver private structure + * Description: this is to verify if the HW supports the PTPv1 or PTPv2. + * This is done by looking at the HW cap. register. + * This function also registers the ptp driver. + */ +static int stmmac_init_ptp(struct stmmac_priv *priv) +{ + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + int ret; + + if (priv->plat->ptp_clk_freq_config) + priv->plat->ptp_clk_freq_config(priv); + + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); + if (ret) + return ret; + + priv->adv_ts = 0; + /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ + if (xmac && priv->dma_cap.atime_stamp) + priv->adv_ts = 1; + /* Dwmac 3.x core with extend_desc can support adv_ts */ + else if (priv->extend_desc && priv->dma_cap.atime_stamp) + priv->adv_ts = 1; + + if (priv->dma_cap.time_stamp) + netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); + + if (priv->adv_ts) + netdev_info(priv->dev, + "IEEE 1588-2008 Advanced Timestamp supported\n"); + + priv->hwts_tx_en = 0; + priv->hwts_rx_en = 0; + + return 0; +} + +static void stmmac_release_ptp(struct stmmac_priv *priv) +{ + clk_disable_unprepare(priv->plat->clk_ptp_ref); + stmmac_ptp_unregister(priv); +} + +/** + * stmmac_mac_flow_ctrl - Configure flow control in all queues + * @priv: driver private structure + * @duplex: duplex passed to the next function + * Description: It is used for configuring the flow control in all queues + */ +static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + + stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, + priv->pause, tx_cnt); +} + +static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + if (!priv->hw->xpcs) + return NULL; + + return &priv->hw->xpcs->pcs; +} + +static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + /* Nothing to do, xpcs_config() handles everything */ +} + +static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (is_up && *hs_enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, + MPACKET_VERIFY); + } else { + *lo_state = FPE_STATE_OFF; + *lp_state = FPE_STATE_OFF; + } +} + +static void stmmac_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + if (get_ecdev(priv)) { + ecdev_set_link(get_ecdev(priv), false); + } + stmmac_mac_set(priv, priv->ioaddr, false); + priv->eee_active = false; + priv->tx_lpi_enabled = false; + priv->eee_enabled = stmmac_eee_init(priv); + stmmac_set_eee_pls(priv, priv->hw, false); + + if (priv->dma_cap.fpesel) + stmmac_fpe_link_state_handle(priv, false); +} + +static void stmmac_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + u32 old_ctrl, ctrl; + + old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); + ctrl = old_ctrl & ~priv->hw->link.speed_mask; + + if (interface == PHY_INTERFACE_MODE_USXGMII) { + switch (speed) { + case SPEED_10000: + ctrl |= priv->hw->link.xgmii.speed10000; + break; + case SPEED_5000: + ctrl |= priv->hw->link.xgmii.speed5000; + break; + case SPEED_2500: + ctrl |= priv->hw->link.xgmii.speed2500; + break; + default: + return; + } + } else if (interface == PHY_INTERFACE_MODE_XLGMII) { + switch (speed) { + case SPEED_100000: + ctrl |= priv->hw->link.xlgmii.speed100000; + break; + case SPEED_50000: + ctrl |= priv->hw->link.xlgmii.speed50000; + break; + case SPEED_40000: + ctrl |= priv->hw->link.xlgmii.speed40000; + break; + case SPEED_25000: + ctrl |= priv->hw->link.xlgmii.speed25000; + break; + case SPEED_10000: + ctrl |= priv->hw->link.xgmii.speed10000; + break; + case SPEED_2500: + ctrl |= priv->hw->link.speed2500; + break; + case SPEED_1000: + ctrl |= priv->hw->link.speed1000; + break; + default: + return; + } + } else { + switch (speed) { + case SPEED_2500: + ctrl |= priv->hw->link.speed2500; + break; + case SPEED_1000: + ctrl |= priv->hw->link.speed1000; + break; + case SPEED_100: + ctrl |= priv->hw->link.speed100; + break; + case SPEED_10: + ctrl |= priv->hw->link.speed10; + break; + default: + return; + } + } + + priv->speed = speed; + + if (priv->plat->fix_mac_speed) + priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); + + if (!duplex) + ctrl &= ~priv->hw->link.duplex; + else + ctrl |= priv->hw->link.duplex; + + /* Flow Control operation */ + if (rx_pause && tx_pause) + priv->flow_ctrl = FLOW_AUTO; + else if (rx_pause && !tx_pause) + priv->flow_ctrl = FLOW_RX; + else if (!rx_pause && tx_pause) + priv->flow_ctrl = FLOW_TX; + else + priv->flow_ctrl = FLOW_OFF; + + stmmac_mac_flow_ctrl(priv, duplex); + + if (ctrl != old_ctrl) + writel(ctrl, priv->ioaddr + MAC_CTRL_REG); + + stmmac_mac_set(priv, priv->ioaddr, true); + if (phy && priv->dma_cap.eee) { + priv->eee_active = + phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0; + priv->eee_enabled = stmmac_eee_init(priv); + priv->tx_lpi_enabled = priv->eee_enabled; + stmmac_set_eee_pls(priv, priv->hw, true); + } + + if (priv->dma_cap.fpesel) + stmmac_fpe_link_state_handle(priv, true); + if (get_ecdev(priv)) { + ecdev_set_link(get_ecdev(priv), true); + } +} + +static const struct phylink_mac_ops stmmac_phylink_mac_ops = { + .validate = phylink_generic_validate, + .mac_select_pcs = stmmac_mac_select_pcs, + .mac_config = stmmac_mac_config, + .mac_link_down = stmmac_mac_link_down, + .mac_link_up = stmmac_mac_link_up, +}; + +/** + * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported + * @priv: driver private structure + * Description: this is to verify if the HW supports the PCS. + * Physical Coding Sublayer (PCS) interface that can be used when the MAC is + * configured for the TBI, RTBI, or SGMII PHY interface. + */ +static void stmmac_check_pcs_mode(struct stmmac_priv *priv) +{ + int interface = priv->plat->interface; + + if (priv->dma_cap.pcs) { + if ((interface == PHY_INTERFACE_MODE_RGMII) || + (interface == PHY_INTERFACE_MODE_RGMII_ID) || + (interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { + netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); + priv->hw->pcs = STMMAC_PCS_RGMII; + } else if (interface == PHY_INTERFACE_MODE_SGMII) { + netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); + priv->hw->pcs = STMMAC_PCS_SGMII; + } + } +} + +/** + * stmmac_init_phy - PHY initialization + * @dev: net device structure + * Description: it initializes the driver's PHY state, and attaches the PHY + * to the mac driver. + * Return value: + * 0 on success + */ +static int stmmac_init_phy(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct fwnode_handle *phy_fwnode; + struct fwnode_handle *fwnode; + int ret; + + if (!phylink_expects_phy(priv->phylink)) + return 0; + + fwnode = of_fwnode_handle(priv->plat->phylink_node); + if (!fwnode) + fwnode = dev_fwnode(priv->device); + + if (fwnode) + phy_fwnode = fwnode_get_phy_node(fwnode); + else + phy_fwnode = NULL; + + /* Some DT bindings do not set-up the PHY handle. Let's try to + * manually parse it + */ + if (!phy_fwnode || IS_ERR(phy_fwnode)) { + int addr = priv->plat->phy_addr; + struct phy_device *phydev; + + if (addr < 0) { + netdev_err(priv->dev, "no phy found\n"); + return -ENODEV; + } + + phydev = mdiobus_get_phy(priv->mii, addr); + if (!phydev) { + netdev_err(priv->dev, "no phy at addr %d\n", addr); + return -ENODEV; + } + + ret = phylink_connect_phy(priv->phylink, phydev); + } else { + fwnode_handle_put(phy_fwnode); + ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); + } + + if (!get_ecdev(priv) && !priv->plat->pmt) { + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + + phylink_ethtool_get_wol(priv->phylink, &wol); + device_set_wakeup_capable(priv->device, !!wol.supported); + device_set_wakeup_enable(priv->device, !!wol.wolopts); + } + + return ret; +} + +static int stmmac_phy_setup(struct stmmac_priv *priv) +{ + struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; + struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); + int max_speed = priv->plat->max_speed; + int mode = priv->plat->phy_interface; + struct phylink *phylink; + + priv->phylink_config.dev = &priv->dev->dev; + priv->phylink_config.type = PHYLINK_NETDEV; + if (priv->plat->mdio_bus_data) + priv->phylink_config.ovr_an_inband = + mdio_bus_data->xpcs_an_inband; + + if (!fwnode) + fwnode = dev_fwnode(priv->device); + + /* Set the platform/firmware specified interface mode */ + __set_bit(mode, priv->phylink_config.supported_interfaces); + + /* If we have an xpcs, it defines which PHY interfaces are supported. */ + if (priv->hw->xpcs) + xpcs_get_interfaces(priv->hw->xpcs, + priv->phylink_config.supported_interfaces); + + priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100; + + if (!max_speed || max_speed >= 1000) + priv->phylink_config.mac_capabilities |= MAC_1000; + + if (priv->plat->has_gmac4) { + if (!max_speed || max_speed >= 2500) + priv->phylink_config.mac_capabilities |= MAC_2500FD; + } else if (priv->plat->has_xgmac) { + if (!max_speed || max_speed >= 2500) + priv->phylink_config.mac_capabilities |= MAC_2500FD; + if (!max_speed || max_speed >= 5000) + priv->phylink_config.mac_capabilities |= MAC_5000FD; + if (!max_speed || max_speed >= 10000) + priv->phylink_config.mac_capabilities |= MAC_10000FD; + if (!max_speed || max_speed >= 25000) + priv->phylink_config.mac_capabilities |= MAC_25000FD; + if (!max_speed || max_speed >= 40000) + priv->phylink_config.mac_capabilities |= MAC_40000FD; + if (!max_speed || max_speed >= 50000) + priv->phylink_config.mac_capabilities |= MAC_50000FD; + if (!max_speed || max_speed >= 100000) + priv->phylink_config.mac_capabilities |= MAC_100000FD; + } + + /* Half-Duplex can only work with single queue */ + if (priv->plat->tx_queues_to_use > 1) + priv->phylink_config.mac_capabilities &= + ~(MAC_10HD | MAC_100HD | MAC_1000HD); + priv->phylink_config.mac_managed_pm = true; + + phylink = phylink_create(&priv->phylink_config, fwnode, + mode, &stmmac_phylink_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + priv->phylink = phylink; + return 0; +} + +static void stmmac_display_rx_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 rx_cnt = priv->plat->rx_queues_to_use; + unsigned int desc_size; + void *head_rx; + u32 queue; + + /* Display RX rings */ + for (queue = 0; queue < rx_cnt; queue++) { + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + + pr_info("\tRX Queue %u rings\n", queue); + + if (priv->extend_desc) { + head_rx = (void *)rx_q->dma_erx; + desc_size = sizeof(struct dma_extended_desc); + } else { + head_rx = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } + + /* Display RX ring */ + stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); + } +} + +static void stmmac_display_tx_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + unsigned int desc_size; + void *head_tx; + u32 queue; + + /* Display TX rings */ + for (queue = 0; queue < tx_cnt; queue++) { + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + + pr_info("\tTX Queue %d rings\n", queue); + + if (priv->extend_desc) { + head_tx = (void *)tx_q->dma_etx; + desc_size = sizeof(struct dma_extended_desc); + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { + head_tx = (void *)tx_q->dma_entx; + desc_size = sizeof(struct dma_edesc); + } else { + head_tx = (void *)tx_q->dma_tx; + desc_size = sizeof(struct dma_desc); + } + + stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, + tx_q->dma_tx_phy, desc_size); + } +} + +static void stmmac_display_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + /* Display RX ring */ + stmmac_display_rx_rings(priv, dma_conf); + + /* Display TX ring */ + stmmac_display_tx_rings(priv, dma_conf); +} + +static int stmmac_set_bfsize(int mtu, int bufsize) +{ + int ret = bufsize; + + if (mtu >= BUF_SIZE_8KiB) + ret = BUF_SIZE_16KiB; + else if (mtu >= BUF_SIZE_4KiB) + ret = BUF_SIZE_8KiB; + else if (mtu >= BUF_SIZE_2KiB) + ret = BUF_SIZE_4KiB; + else if (mtu > DEFAULT_BUFSIZE) + ret = BUF_SIZE_2KiB; + else + ret = DEFAULT_BUFSIZE; + + return ret; +} + +/** + * stmmac_clear_rx_descriptors - clear RX descriptors + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + * Description: this function is called to clear the RX descriptors + * in case of both basic and extended descriptors are used. + */ +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + /* Clear the RX descriptors */ + for (i = 0; i < dma_conf->dma_rx_size; i++) + if (priv->extend_desc) + stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, + priv->use_riwt, priv->mode, + (i == dma_conf->dma_rx_size - 1), + dma_conf->dma_buf_sz); + else + stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], + priv->use_riwt, priv->mode, + (i == dma_conf->dma_rx_size - 1), + dma_conf->dma_buf_sz); +} + +/** + * stmmac_clear_tx_descriptors - clear tx descriptors + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index. + * Description: this function is called to clear the TX descriptors + * in case of both basic and extended descriptors are used. + */ +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + int i; + + /* Clear the TX descriptors */ + for (i = 0; i < dma_conf->dma_tx_size; i++) { + int last = (i == (dma_conf->dma_tx_size - 1)); + struct dma_desc *p; + + if (priv->extend_desc) + p = &tx_q->dma_etx[i].basic; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &tx_q->dma_entx[i].basic; + else + p = &tx_q->dma_tx[i]; + + stmmac_init_tx_desc(priv, p, priv->mode, last); + } +} + +/** + * stmmac_clear_descriptors - clear descriptors + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * Description: this function is called to clear the TX and RX descriptors + * in case of both basic and extended descriptors are used. + */ +static void stmmac_clear_descriptors(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 rx_queue_cnt = priv->plat->rx_queues_to_use; + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + /* Clear the RX descriptors */ + for (queue = 0; queue < rx_queue_cnt; queue++) + stmmac_clear_rx_descriptors(priv, dma_conf, queue); + + /* Clear the TX descriptors */ + for (queue = 0; queue < tx_queue_cnt; queue++) + stmmac_clear_tx_descriptors(priv, dma_conf, queue); +} + +/** + * stmmac_init_rx_buffers - init the RX descriptor buffer. + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @p: descriptor pointer + * @i: descriptor index + * @flags: gfp flag + * @queue: RX queue index + * Description: this function is called to allocate a receive buffer, perform + * the DMA mapping and init the descriptor. + */ +static int stmmac_init_rx_buffers(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + struct dma_desc *p, + int i, gfp_t flags, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + if (priv->dma_cap.host_dma_width <= 32) + gfp |= GFP_DMA32; + + if (!buf->page) { + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); + if (!buf->page) + return -ENOMEM; + buf->page_offset = stmmac_rx_offset(priv); + } + + if (priv->sph && !buf->sec_page) { + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); + if (!buf->sec_page) + return -ENOMEM; + + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); + } else { + buf->sec_page = NULL; + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); + } + + buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; + + stmmac_set_desc_addr(priv, p, buf->addr); + if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) + stmmac_init_desc3(priv, p); + + return 0; +} + +/** + * stmmac_free_rx_buffer - free RX dma buffers + * @priv: private structure + * @rx_q: RX queue + * @i: buffer index. + */ +static void stmmac_free_rx_buffer(struct stmmac_priv *priv, + struct stmmac_rx_queue *rx_q, + int i) +{ + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + + if (buf->page) + page_pool_put_full_page(rx_q->page_pool, buf->page, false); + buf->page = NULL; + + if (buf->sec_page) + page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); + buf->sec_page = NULL; +} + +/** + * stmmac_free_tx_buffer - free RX dma buffers + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + * @i: buffer index. + */ +static void stmmac_free_tx_buffer(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue, int i) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + + if (tx_q->tx_skbuff_dma[i].buf && + tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { + if (tx_q->tx_skbuff_dma[i].map_as_page) + dma_unmap_page(priv->device, + tx_q->tx_skbuff_dma[i].buf, + tx_q->tx_skbuff_dma[i].len, + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + tx_q->tx_skbuff_dma[i].buf, + tx_q->tx_skbuff_dma[i].len, + DMA_TO_DEVICE); + } + + if (tx_q->xdpf[i] && + (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || + tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { + xdp_return_frame(tx_q->xdpf[i]); + tx_q->xdpf[i] = NULL; + } + + if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + + if (!get_ecdev(priv) && + tx_q->tx_skbuff[i] && + tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { + dev_kfree_skb_any(tx_q->tx_skbuff[i]); + tx_q->tx_skbuff[i] = NULL; + } + + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; +} + +/** + * dma_free_rx_skbufs - free RX dma buffers + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + */ +static void dma_free_rx_skbufs(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + for (i = 0; i < dma_conf->dma_rx_size; i++) + stmmac_free_rx_buffer(priv, rx_q, i); +} + +static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue, gfp_t flags) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + for (i = 0; i < dma_conf->dma_rx_size; i++) { + struct dma_desc *p; + int ret; + + if (priv->extend_desc) + p = &((rx_q->dma_erx + i)->basic); + else + p = rx_q->dma_rx + i; + + ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, + queue); + if (ret) + return ret; + + rx_q->buf_alloc_num++; + } + + return 0; +} + +/** + * dma_free_rx_xskbufs - free RX dma buffers from XSK pool + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + */ +static void dma_free_rx_xskbufs(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + for (i = 0; i < dma_conf->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + + if (!buf->xdp) + continue; + + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + } +} + +static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + for (i = 0; i < dma_conf->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf; + dma_addr_t dma_addr; + struct dma_desc *p; + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + i); + else + p = rx_q->dma_rx + i; + + buf = &rx_q->buf_pool[i]; + + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) + return -ENOMEM; + + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, p, dma_addr); + rx_q->buf_alloc_num++; + } + + return 0; +} + +static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) +{ + if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) + return NULL; + + return xsk_get_pool_from_qid(priv->dev, queue); +} + +/** + * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + * @flags: gfp flag. + * Description: this function initializes the DMA RX descriptors + * and allocates the socket buffers. It supports the chained and ring + * modes. + */ +static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue, gfp_t flags) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int ret; + + netif_dbg(priv, probe, priv->dev, + "(%s) dma_rx_phy=0x%08x\n", __func__, + (u32)rx_q->dma_rx_phy); + + stmmac_clear_rx_descriptors(priv, dma_conf, queue); + + xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); + + rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); + + if (rx_q->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + netdev_info(priv->dev, + "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", + rx_q->queue_index); + xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, + MEM_TYPE_PAGE_POOL, + rx_q->page_pool)); + netdev_info(priv->dev, + "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", + rx_q->queue_index); + } + + if (rx_q->xsk_pool) { + /* RX XDP ZC buffer pool may not be populated, e.g. + * xdpsock TX-only. + */ + stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); + } else { + ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); + if (ret < 0) + return -ENOMEM; + } + + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, rx_q->dma_erx, + rx_q->dma_rx_phy, + dma_conf->dma_rx_size, 1); + else + stmmac_mode_init(priv, rx_q->dma_rx, + rx_q->dma_rx_phy, + dma_conf->dma_rx_size, 0); + } + + return 0; +} + +static int init_dma_rx_desc_rings(struct net_device *dev, + struct stmmac_dma_conf *dma_conf, + gfp_t flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_count = priv->plat->rx_queues_to_use; + int queue; + int ret; + + /* RX INITIALIZATION */ + netif_dbg(priv, probe, priv->dev, + "SKB addresses:\nskb\t\tskb data\tdma data\n"); + + for (queue = 0; queue < rx_count; queue++) { + ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); + if (ret) + goto err_init_rx_buffers; + } + + return 0; + +err_init_rx_buffers: + while (queue >= 0) { + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, dma_conf, queue); + else + dma_free_rx_skbufs(priv, dma_conf, queue); + + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; + + queue--; + } + + return ret; +} + +/** + * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index + * Description: this function initializes the DMA TX descriptors + * and allocates the socket buffers. It supports the chained and ring + * modes. + */ +static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + int i; + + netif_dbg(priv, probe, priv->dev, + "(%s) dma_tx_phy=0x%08x\n", __func__, + (u32)tx_q->dma_tx_phy); + + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, tx_q->dma_etx, + tx_q->dma_tx_phy, + dma_conf->dma_tx_size, 1); + else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) + stmmac_mode_init(priv, tx_q->dma_tx, + tx_q->dma_tx_phy, + dma_conf->dma_tx_size, 0); + } + + tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); + + for (i = 0; i < dma_conf->dma_tx_size; i++) { + struct dma_desc *p; + + if (priv->extend_desc) + p = &((tx_q->dma_etx + i)->basic); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &((tx_q->dma_entx + i)->basic); + else + p = tx_q->dma_tx + i; + + stmmac_clear_desc(priv, p); + + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; + tx_q->tx_skbuff_dma[i].len = 0; + tx_q->tx_skbuff_dma[i].last_segment = false; + tx_q->tx_skbuff[i] = NULL; + } + + return 0; +} + +static int init_dma_tx_desc_rings(struct net_device *dev, + struct stmmac_dma_conf *dma_conf) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_queue_cnt; + u32 queue; + + tx_queue_cnt = priv->plat->tx_queues_to_use; + + for (queue = 0; queue < tx_queue_cnt; queue++) + __init_dma_tx_desc_rings(priv, dma_conf, queue); + + return 0; +} + +/** + * init_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * @dma_conf: structure to take the dma data + * @flags: gfp flag. + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. It supports the chained and ring + * modes. + */ +static int init_dma_desc_rings(struct net_device *dev, + struct stmmac_dma_conf *dma_conf, + gfp_t flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + ret = init_dma_rx_desc_rings(dev, dma_conf, flags); + if (ret) + return ret; + + ret = init_dma_tx_desc_rings(dev, dma_conf); + + stmmac_clear_descriptors(priv, dma_conf); + + if (netif_msg_hw(priv)) + stmmac_display_rings(priv, dma_conf); + + return ret; +} + +/** + * dma_free_tx_skbufs - free TX dma buffers + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index + */ +static void dma_free_tx_skbufs(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + int i; + + tx_q->xsk_frames_done = 0; + + for (i = 0; i < dma_conf->dma_tx_size; i++) + stmmac_free_tx_buffer(priv, dma_conf, queue, i); + + if (tx_q->xsk_pool && tx_q->xsk_frames_done) { + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + tx_q->xsk_frames_done = 0; + tx_q->xsk_pool = NULL; + } +} + +/** + * stmmac_free_tx_skbufs - free TX skb buffers + * @priv: private structure + */ +static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) +{ + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + for (queue = 0; queue < tx_queue_cnt; queue++) + dma_free_tx_skbufs(priv, &priv->dma_conf, queue); +} + +/** + * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + */ +static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + + /* Release the DMA RX socket buffers */ + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, dma_conf, queue); + else + dma_free_rx_skbufs(priv, dma_conf, queue); + + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; + + /* Free DMA regions of consistent memory previously allocated */ + if (!priv->extend_desc) + dma_free_coherent(priv->device, dma_conf->dma_rx_size * + sizeof(struct dma_desc), + rx_q->dma_rx, rx_q->dma_rx_phy); + else + dma_free_coherent(priv->device, dma_conf->dma_rx_size * + sizeof(struct dma_extended_desc), + rx_q->dma_erx, rx_q->dma_rx_phy); + + if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) + xdp_rxq_info_unreg(&rx_q->xdp_rxq); + + kfree(rx_q->buf_pool); + if (rx_q->page_pool) + page_pool_destroy(rx_q->page_pool); +} + +static void free_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + + /* Free RX queue resources */ + for (queue = 0; queue < rx_count; queue++) + __free_dma_rx_desc_resources(priv, dma_conf, queue); +} + +/** + * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index + */ +static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + size_t size; + void *addr; + + /* Release the DMA TX socket buffers */ + dma_free_tx_skbufs(priv, dma_conf, queue); + + if (priv->extend_desc) { + size = sizeof(struct dma_extended_desc); + addr = tx_q->dma_etx; + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { + size = sizeof(struct dma_edesc); + addr = tx_q->dma_entx; + } else { + size = sizeof(struct dma_desc); + addr = tx_q->dma_tx; + } + + size *= dma_conf->dma_tx_size; + + dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); + + kfree(tx_q->tx_skbuff_dma); + kfree(tx_q->tx_skbuff); + tx_q->tx_skbuff_dma = NULL; + tx_q->tx_skbuff = NULL; +} + +static void free_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; + + /* Free TX queue resources */ + for (queue = 0; queue < tx_count; queue++) + __free_dma_tx_desc_resources(priv, dma_conf, queue); +} + +/** + * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + bool xdp_prog = stmmac_xdp_is_enabled(priv); + struct page_pool_params pp_params = { 0 }; + unsigned int num_pages; + unsigned int napi_id; + int ret; + + rx_q->queue_index = queue; + rx_q->priv_data = priv; + + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = dma_conf->dma_rx_size; + num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); + pp_params.order = ilog2(num_pages); + pp_params.nid = dev_to_node(priv->device); + pp_params.dev = priv->device; + pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + pp_params.offset = stmmac_rx_offset(priv); + pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); + + rx_q->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_q->page_pool)) { + ret = PTR_ERR(rx_q->page_pool); + rx_q->page_pool = NULL; + return ret; + } + + rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, + sizeof(*rx_q->buf_pool), + GFP_KERNEL); + if (!rx_q->buf_pool) + return -ENOMEM; + + if (priv->extend_desc) { + rx_q->dma_erx = dma_alloc_coherent(priv->device, + dma_conf->dma_rx_size * + sizeof(struct dma_extended_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_erx) + return -ENOMEM; + + } else { + rx_q->dma_rx = dma_alloc_coherent(priv->device, + dma_conf->dma_rx_size * + sizeof(struct dma_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_rx) + return -ENOMEM; + } + + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) + napi_id = ch->rxtx_napi.napi_id; + else + napi_id = ch->rx_napi.napi_id; + + ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, + rx_q->queue_index, + napi_id); + if (ret) { + netdev_err(priv->dev, "Failed to register xdp rxq info\n"); + return -EINVAL; + } + + return 0; +} + +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + int ret; + + /* RX queues buffers and DMA */ + for (queue = 0; queue < rx_count; queue++) { + ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); + if (ret) + goto err_dma; + } + + return 0; + +err_dma: + free_dma_rx_desc_resources(priv, dma_conf); + + return ret; +} + +/** + * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + size_t size; + void *addr; + + tx_q->queue_index = queue; + tx_q->priv_data = priv; + + tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, + sizeof(*tx_q->tx_skbuff_dma), + GFP_KERNEL); + if (!tx_q->tx_skbuff_dma) + return -ENOMEM; + + tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!tx_q->tx_skbuff) + return -ENOMEM; + + if (priv->extend_desc) + size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + size = sizeof(struct dma_edesc); + else + size = sizeof(struct dma_desc); + + size *= dma_conf->dma_tx_size; + + addr = dma_alloc_coherent(priv->device, size, + &tx_q->dma_tx_phy, GFP_KERNEL); + if (!addr) + return -ENOMEM; + + if (priv->extend_desc) + tx_q->dma_etx = addr; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_q->dma_entx = addr; + else + tx_q->dma_tx = addr; + + return 0; +} + +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; + int ret; + + /* TX queues buffers and DMA */ + for (queue = 0; queue < tx_count; queue++) { + ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); + if (ret) + goto err_dma; + } + + return 0; + +err_dma: + free_dma_tx_desc_resources(priv, dma_conf); + return ret; +} + +/** + * alloc_dma_desc_resources - alloc TX/RX resources. + * @priv: private structure + * @dma_conf: structure to take the dma data + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int alloc_dma_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + /* RX Allocation */ + int ret = alloc_dma_rx_desc_resources(priv, dma_conf); + + if (ret) + return ret; + + ret = alloc_dma_tx_desc_resources(priv, dma_conf); + + return ret; +} + +/** + * free_dma_desc_resources - free dma desc resources + * @priv: private structure + * @dma_conf: structure to take the dma data + */ +static void free_dma_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + /* Release the DMA TX socket buffers */ + free_dma_tx_desc_resources(priv, dma_conf); + + /* Release the DMA RX socket buffers later + * to ensure all pending XDP_TX buffers are returned. + */ + free_dma_rx_desc_resources(priv, dma_conf); +} + +/** + * stmmac_mac_enable_rx_queues - Enable MAC rx queues + * @priv: driver private structure + * Description: It is used for enabling the rx queues in the MAC + */ +static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + int queue; + u8 mode; + + for (queue = 0; queue < rx_queues_count; queue++) { + mode = priv->plat->rx_queues_cfg[queue].mode_to_use; + stmmac_rx_queue_enable(priv, priv->hw, mode, queue); + } +} + +/** + * stmmac_start_rx_dma - start RX DMA channel + * @priv: driver private structure + * @chan: RX channel index + * Description: + * This starts a RX DMA channel + */ +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); + stmmac_start_rx(priv, priv->ioaddr, chan); +} + +/** + * stmmac_start_tx_dma - start TX DMA channel + * @priv: driver private structure + * @chan: TX channel index + * Description: + * This starts a TX DMA channel + */ +static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); + stmmac_start_tx(priv, priv->ioaddr, chan); +} + +/** + * stmmac_stop_rx_dma - stop RX DMA channel + * @priv: driver private structure + * @chan: RX channel index + * Description: + * This stops a RX DMA channel + */ +static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); + stmmac_stop_rx(priv, priv->ioaddr, chan); +} + +/** + * stmmac_stop_tx_dma - stop TX DMA channel + * @priv: driver private structure + * @chan: TX channel index + * Description: + * This stops a TX DMA channel + */ +static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); + stmmac_stop_tx(priv, priv->ioaddr, chan); +} + +static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); + u32 chan; + + if (get_ecdev(priv)) + return; + + for (chan = 0; chan < dma_csr_ch; chan++) { + struct stmmac_channel *ch = &priv->channel[chan]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } +} + +/** + * stmmac_start_all_dma - start all RX and TX DMA channels + * @priv: driver private structure + * Description: + * This starts all the RX and TX DMA channels + */ +static void stmmac_start_all_dma(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; + + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_start_rx_dma(priv, chan); + + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_start_tx_dma(priv, chan); +} + +/** + * stmmac_stop_all_dma - stop all RX and TX DMA channels + * @priv: driver private structure + * Description: + * This stops the RX and TX DMA channels + */ +static void stmmac_stop_all_dma(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; + + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_stop_rx_dma(priv, chan); + + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_stop_tx_dma(priv, chan); +} + +/** + * stmmac_dma_operation_mode - HW DMA operation mode + * @priv: driver private structure + * Description: it is used for configuring the DMA operation mode register in + * order to program the tx/rx DMA thresholds or Store-And-Forward mode. + */ +static void stmmac_dma_operation_mode(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + int rxfifosz = priv->plat->rx_fifo_size; + int txfifosz = priv->plat->tx_fifo_size; + u32 txmode = 0; + u32 rxmode = 0; + u32 chan = 0; + u8 qmode = 0; + + if (rxfifosz == 0) + rxfifosz = priv->dma_cap.rx_fifo_size; + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + /* Adjust for real per queue fifo size */ + rxfifosz /= rx_channels_count; + txfifosz /= tx_channels_count; + + if (priv->plat->force_thresh_dma_mode) { + txmode = tc; + rxmode = tc; + } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { + /* + * In case of GMAC, SF mode can be enabled + * to perform the TX COE in HW. This depends on: + * 1) TX COE if actually supported + * 2) There is no bugged Jumbo frame support + * that needs to not insert csum in the TDES. + */ + txmode = SF_DMA_MODE; + rxmode = SF_DMA_MODE; + priv->xstats.threshold = SF_DMA_MODE; + } else { + txmode = tc; + rxmode = SF_DMA_MODE; + } + + /* configure all channels */ + for (chan = 0; chan < rx_channels_count; chan++) { + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; + u32 buf_size; + + qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; + + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, + rxfifosz, qmode); + + if (rx_q->xsk_pool) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + chan); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_conf.dma_buf_sz, + chan); + } + } + + for (chan = 0; chan < tx_channels_count; chan++) { + qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; + + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, + txfifosz, qmode); + } +} + +static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct xsk_buff_pool *pool = tx_q->xsk_pool; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc = NULL; + struct xdp_desc xdp_desc; + bool work_done = true; + + /* Avoids TX time-out as we are sharing with slow path */ + ec_txq_trans_cond_update(nq); + + budget = min(budget, stmmac_tx_avail(priv, queue)); + + while (budget-- > 0) { + dma_addr_t dma_addr; + bool set_ic; + + /* We are sharing with slow path and stop XSK TX desc submission when + * available TX ring is less than threshold. + */ + if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || + !netif_carrier_ok(priv->dev)) { + work_done = false; + break; + } + + if (!xsk_tx_peek_desc(pool, &xdp_desc)) + break; + + if (likely(priv->extend_desc)) + tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_desc = &tx_q->dma_entx[entry].basic; + else + tx_desc = tx_q->dma_tx + entry; + + dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; + + /* To return XDP buffer to XSK pool, we simple call + * xsk_tx_completed(), so we don't need to fill up + * 'buf' and 'xdpf'. + */ + tx_q->tx_skbuff_dma[entry].buf = 0; + tx_q->xdpf[entry] = NULL; + + tx_q->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; + tx_q->tx_skbuff_dma[entry].last_segment = true; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + tx_q->tx_count_frames++; + + if (!priv->tx_coal_frames[queue]) + set_ic = false; + else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, tx_desc); + priv->xstats.tx_set_ic_bit++; + } + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, + true, priv->mode, true, true, + xdp_desc.len); + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); + entry = tx_q->cur_tx; + } + + if (tx_desc) { + stmmac_flush_tx_descriptors(priv, queue); + xsk_tx_release(pool); + } + + /* Return true if all of the 3 conditions are met + * a) TX Budget is still available + * b) work_done = true when XSK TX desc peek is empty (no more + * pending XSK TX for transmission) + */ + return !!budget && work_done; +} + +static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) +{ + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { + tc += 64; + + if (priv->plat->force_thresh_dma_mode) + stmmac_set_dma_operation_mode(priv, tc, tc, chan); + else + stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, + chan); + + priv->xstats.threshold = tc; + } +} + +/** + * stmmac_tx_clean - to manage the transmission completion + * @priv: driver private structure + * @budget: napi budget limiting this functions packet handling + * @queue: TX queue index + * Description: it reclaims the transmit resources after transmission completes. + */ +static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + unsigned int bytes_compl = 0, pkts_compl = 0; + unsigned int entry, xmits = 0, count = 0; + + if (!get_ecdev(priv)) + __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); + + priv->xstats.tx_clean++; + + tx_q->xsk_frames_done = 0; + + entry = tx_q->dirty_tx; + + /* Try to clean all TX complete frame in 1 shot */ + while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { + struct xdp_frame *xdpf; + struct sk_buff *skb; + struct dma_desc *p; + int status; + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { + xdpf = tx_q->xdpf[entry]; + skb = NULL; + } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + xdpf = NULL; + skb = tx_q->tx_skbuff[entry]; + } else { + xdpf = NULL; + skb = NULL; + } + + if (priv->extend_desc) + p = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &tx_q->dma_entx[entry].basic; + else + p = tx_q->dma_tx + entry; + + status = stmmac_tx_status(priv, &priv->dev->stats, + &priv->xstats, p, priv->ioaddr); + /* Check if the descriptor is owned by the DMA */ + if (unlikely(status & tx_dma_own)) + break; + + count++; + + /* Make sure descriptor fields are read after reading + * the own bit. + */ + dma_rmb(); + + /* Just consider the last segment and ...*/ + if (likely(!(status & tx_not_ls))) { + /* ... verify the status error condition */ + if (unlikely(status & tx_err)) { + priv->dev->stats.tx_errors++; + if (unlikely(status & tx_err_bump_tc)) + stmmac_bump_dma_threshold(priv, queue); + } else { + priv->dev->stats.tx_packets++; + priv->xstats.tx_pkt_n++; + priv->xstats.txq_stats[queue].tx_pkt_n++; + } + if (skb) + stmmac_get_tx_hwtstamp(priv, p, skb); + } + + if (likely(tx_q->tx_skbuff_dma[entry].buf && + tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { + if (tx_q->tx_skbuff_dma[entry].map_as_page) + dma_unmap_page(priv->device, + tx_q->tx_skbuff_dma[entry].buf, + tx_q->tx_skbuff_dma[entry].len, + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + tx_q->tx_skbuff_dma[entry].buf, + tx_q->tx_skbuff_dma[entry].len, + DMA_TO_DEVICE); + tx_q->tx_skbuff_dma[entry].buf = 0; + tx_q->tx_skbuff_dma[entry].len = 0; + tx_q->tx_skbuff_dma[entry].map_as_page = false; + } + + stmmac_clean_desc3(priv, tx_q, p); + + tx_q->tx_skbuff_dma[entry].last_segment = false; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + if (xdpf && + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + tx_q->xdpf[entry] = NULL; + } + + if (xdpf && + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { + xdp_return_frame(xdpf); + tx_q->xdpf[entry] = NULL; + } + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + if (likely(skb)) { + pkts_compl++; + bytes_compl += skb->len; + if (!get_ecdev(priv)) { + dev_consume_skb_any(skb); + } + tx_q->tx_skbuff[entry] = NULL; + } + } + + stmmac_release_tx_desc(priv, p, priv->mode); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + } + tx_q->dirty_tx = entry; + + if (!get_ecdev(priv)) { + netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), + pkts_compl, bytes_compl); + } + + if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, + queue))) && + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { + + netif_dbg(priv, tx_done, priv->dev, + "%s: restart transmit\n", __func__); + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); + } + + if (tx_q->xsk_pool) { + bool work_done; + + if (tx_q->xsk_frames_done) + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + + if (xsk_uses_need_wakeup(tx_q->xsk_pool)) + xsk_set_tx_need_wakeup(tx_q->xsk_pool); + + /* For XSK TX, we try to send as many as possible. + * If XSK work done (XSK TX desc empty and budget still + * available), return "budget - 1" to reenable TX IRQ. + * Else, return "budget" to make NAPI continue polling. + */ + work_done = stmmac_xdp_xmit_zc(priv, queue, + STMMAC_XSK_TX_BUDGET_MAX); + if (work_done) + xmits = budget - 1; + else + xmits = budget; + } + + if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && + priv->eee_sw_timer_en) { + if (stmmac_enable_eee_mode(priv)) + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); + } + + /* We still have pending packets, let's call for a new scheduling */ + if (!get_ecdev(priv) && tx_q->dirty_tx != tx_q->cur_tx) + stmmac_tx_timer_arm(priv, queue); + + if (!get_ecdev(priv)) + __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); + + /* Combine decisions from TX clean and XSK TX */ + return max(count, xmits); +} + +/** + * stmmac_tx_err - to manage the tx error + * @priv: driver private structure + * @chan: channel index + * Description: it cleans the descriptors and restarts the transmission + * in case of transmission errors. + */ +static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); + + stmmac_stop_tx_dma(priv, chan); + dma_free_tx_skbufs(priv, &priv->dma_conf, chan); + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); + stmmac_reset_tx_queue(priv, chan); + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + stmmac_start_tx_dma(priv, chan); + + priv->dev->stats.tx_errors++; + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); +} + +/** + * stmmac_set_dma_operation_mode - Set DMA operation mode by channel + * @priv: driver private structure + * @txmode: TX operating mode + * @rxmode: RX operating mode + * @chan: channel index + * Description: it is used for configuring of the DMA operation mode in + * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward + * mode. + */ +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, + u32 rxmode, u32 chan) +{ + u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; + u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + int rxfifosz = priv->plat->rx_fifo_size; + int txfifosz = priv->plat->tx_fifo_size; + + if (rxfifosz == 0) + rxfifosz = priv->dma_cap.rx_fifo_size; + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + /* Adjust for real per queue fifo size */ + rxfifosz /= rx_channels_count; + txfifosz /= tx_channels_count; + + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); +} + +static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) +{ + int ret; + + ret = stmmac_safety_feat_irq_status(priv, priv->dev, + priv->ioaddr, priv->dma_cap.asp, &priv->sstats); + if (ret && (ret != -EINVAL)) { + stmmac_global_err(priv); + return true; + } + + return false; +} + +static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) +{ + int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan, dir); + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + struct stmmac_channel *ch = &priv->channel[chan]; + struct napi_struct *rx_napi; + struct napi_struct *tx_napi; + unsigned long flags; + + rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; + tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; + + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { + if (napi_schedule_prep(rx_napi)) { + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); + __napi_schedule(rx_napi); + } + } + + if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { + if (napi_schedule_prep(tx_napi)) { + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + __napi_schedule(tx_napi); + } + } + + return status; +} + +/** + * stmmac_dma_interrupt - DMA ISR + * @priv: driver private structure + * Description: this is the DMA ISR. It is called by the main ISR. + * It calls the dwmac dma routine and schedule poll method in case of some + * work can be done. + */ +static void stmmac_dma_interrupt(struct stmmac_priv *priv) +{ + u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 rx_channel_count = priv->plat->rx_queues_to_use; + u32 channels_to_check = tx_channel_count > rx_channel_count ? + tx_channel_count : rx_channel_count; + u32 chan; + int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; + + /* Make sure we never check beyond our status buffer. */ + if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) + channels_to_check = ARRAY_SIZE(status); + + for (chan = 0; chan < channels_to_check; chan++) + status[chan] = stmmac_napi_check(priv, chan, + DMA_DIR_RXTX); + + for (chan = 0; chan < tx_channel_count; chan++) { + if (unlikely(status[chan] & tx_hard_error_bump_tc)) { + /* Try to bump up the dma threshold on this failure */ + stmmac_bump_dma_threshold(priv, chan); + } else if (unlikely(status[chan] == tx_hard_error)) { + stmmac_tx_err(priv, chan); + } + } +} + +/** + * stmmac_mmc_setup: setup the Mac Management Counters (MMC) + * @priv: driver private structure + * Description: this masks the MMC irq, in fact, the counters are managed in SW. + */ +static void stmmac_mmc_setup(struct stmmac_priv *priv) +{ + unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | + MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; + + stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); + + if (priv->dma_cap.rmon) { + stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); + memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); + } else + netdev_info(priv->dev, "No MAC Management Counters available\n"); +} + +/** + * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. + * @priv: driver private structure + * Description: + * new GMAC chip generations have a new register to indicate the + * presence of the optional feature/functions. + * This can be also used to override the value passed through the + * platform and necessary for old MAC10/100 and GMAC chips. + */ +static int stmmac_get_hw_features(struct stmmac_priv *priv) +{ + return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; +} + +/** + * stmmac_check_ether_addr - check if the MAC addr is valid + * @priv: driver private structure + * Description: + * it is to verify if the MAC address is valid, in case of failures it + * generates a random MAC address + */ +static void stmmac_check_ether_addr(struct stmmac_priv *priv) +{ + u8 addr[ETH_ALEN]; + + if (!is_valid_ether_addr(priv->dev->dev_addr)) { + stmmac_get_umac_addr(priv, priv->hw, addr, 0); + if (is_valid_ether_addr(addr)) + eth_hw_addr_set(priv->dev, addr); + else + eth_hw_addr_random(priv->dev); + dev_info(priv->device, "device MAC address %pM\n", + priv->dev->dev_addr); + } +} + +/** + * stmmac_init_dma_engine - DMA init. + * @priv: driver private structure + * Description: + * It inits the DMA invoking the specific MAC/GMAC callback. + * Some DMA parameters can be passed from the platform; + * in case of these are not passed a default is kept for the MAC or GMAC. + */ +static int stmmac_init_dma_engine(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + u32 chan = 0; + int atds = 0; + int ret = 0; + + if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { + dev_err(priv->device, "Invalid DMA configuration\n"); + return -EINVAL; + } + + if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) + atds = 1; + + ret = stmmac_reset(priv, priv->ioaddr); + if (ret) { + dev_err(priv->device, "Failed to reset the dma\n"); + return ret; + } + + /* DMA Configuration */ + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); + + if (priv->plat->axi) + stmmac_axi(priv, priv->ioaddr, priv->plat->axi); + + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) { + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } + + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_channels_count; chan++) { + rx_q = &priv->dma_conf.rx_queue[chan]; + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + } + + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_channels_count; chan++) { + tx_q = &priv->dma_conf.tx_queue[chan]; + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); + } + + return ret; +} + +static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + u32 tx_coal_timer = priv->tx_coal_timer[queue]; + + if (!tx_coal_timer) + return; + + if (get_ecdev(priv)) + return; + + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(tx_coal_timer), + HRTIMER_MODE_REL); +} + +/** + * stmmac_tx_timer - mitigation sw timer for tx. + * @t: data pointer + * Description: + * This is the timer handler to directly invoke the stmmac_tx_clean. + */ +static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) +{ + struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); + struct stmmac_priv *priv = tx_q->priv_data; + struct stmmac_channel *ch; + struct napi_struct *napi; + + ch = &priv->channel[tx_q->queue_index]; + napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; + + if (likely(napi_schedule_prep(napi))) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + __napi_schedule(napi); + } + + return HRTIMER_NORESTART; +} + +/** + * stmmac_init_coalesce - init mitigation options. + * @priv: driver private structure + * Description: + * This inits the coalesce parameters: i.e. timer rate, + * timer handler and default threshold used for enabling the + * interrupt on completion bit. + */ +static void stmmac_init_coalesce(struct stmmac_priv *priv) +{ + u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 rx_channel_count = priv->plat->rx_queues_to_use; + u32 chan; + + for (chan = 0; chan < tx_channel_count; chan++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + + priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; + priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; + + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + tx_q->txtimer.function = stmmac_tx_timer; + } + + for (chan = 0; chan < rx_channel_count; chan++) + priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; +} + +static void stmmac_set_rings_length(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan; + + /* set TX ring length */ + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_set_tx_ring_len(priv, priv->ioaddr, + (priv->dma_conf.dma_tx_size - 1), chan); + + /* set RX ring length */ + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_set_rx_ring_len(priv, priv->ioaddr, + (priv->dma_conf.dma_rx_size - 1), chan); +} + +/** + * stmmac_set_tx_queue_weight - Set TX queue weight + * @priv: driver private structure + * Description: It is used for setting TX queues weight + */ +static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 weight; + u32 queue; + + for (queue = 0; queue < tx_queues_count; queue++) { + weight = priv->plat->tx_queues_cfg[queue].weight; + stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); + } +} + +/** + * stmmac_configure_cbs - Configure CBS in TX queue + * @priv: driver private structure + * Description: It is used for configuring CBS in AVB TX queues + */ +static void stmmac_configure_cbs(struct stmmac_priv *priv) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 mode_to_use; + u32 queue; + + /* queue 0 is reserved for legacy traffic */ + for (queue = 1; queue < tx_queues_count; queue++) { + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; + if (mode_to_use == MTL_QUEUE_DCB) + continue; + + stmmac_config_cbs(priv, priv->hw, + priv->plat->tx_queues_cfg[queue].send_slope, + priv->plat->tx_queues_cfg[queue].idle_slope, + priv->plat->tx_queues_cfg[queue].high_credit, + priv->plat->tx_queues_cfg[queue].low_credit, + queue); + } +} + +/** + * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel + * @priv: driver private structure + * Description: It is used for mapping RX queues to RX dma channels + */ +static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 queue; + u32 chan; + + for (queue = 0; queue < rx_queues_count; queue++) { + chan = priv->plat->rx_queues_cfg[queue].chan; + stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); + } +} + +/** + * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority + * @priv: driver private structure + * Description: It is used for configuring the RX Queue Priority + */ +static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 queue; + u32 prio; + + for (queue = 0; queue < rx_queues_count; queue++) { + if (!priv->plat->rx_queues_cfg[queue].use_prio) + continue; + + prio = priv->plat->rx_queues_cfg[queue].prio; + stmmac_rx_queue_prio(priv, priv->hw, prio, queue); + } +} + +/** + * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority + * @priv: driver private structure + * Description: It is used for configuring the TX Queue Priority + */ +static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue; + u32 prio; + + for (queue = 0; queue < tx_queues_count; queue++) { + if (!priv->plat->tx_queues_cfg[queue].use_prio) + continue; + + prio = priv->plat->tx_queues_cfg[queue].prio; + stmmac_tx_queue_prio(priv, priv->hw, prio, queue); + } +} + +/** + * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing + * @priv: driver private structure + * Description: It is used for configuring the RX queue routing + */ +static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 queue; + u8 packet; + + for (queue = 0; queue < rx_queues_count; queue++) { + /* no specific packet type routing specified for the queue */ + if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) + continue; + + packet = priv->plat->rx_queues_cfg[queue].pkt_route; + stmmac_rx_queue_routing(priv, priv->hw, packet, queue); + } +} + +static void stmmac_mac_config_rss(struct stmmac_priv *priv) +{ + if (!priv->dma_cap.rssen || !priv->plat->rss_en) { + priv->rss.enable = false; + return; + } + + if (priv->dev->features & NETIF_F_RXHASH) + priv->rss.enable = true; + else + priv->rss.enable = false; + + stmmac_rss_configure(priv, priv->hw, &priv->rss, + priv->plat->rx_queues_to_use); +} + +/** + * stmmac_mtl_configuration - Configure MTL + * @priv: driver private structure + * Description: It is used for configurring MTL + */ +static void stmmac_mtl_configuration(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 tx_queues_count = priv->plat->tx_queues_to_use; + + if (tx_queues_count > 1) + stmmac_set_tx_queue_weight(priv); + + /* Configure MTL RX algorithms */ + if (rx_queues_count > 1) + stmmac_prog_mtl_rx_algorithms(priv, priv->hw, + priv->plat->rx_sched_algorithm); + + /* Configure MTL TX algorithms */ + if (tx_queues_count > 1) + stmmac_prog_mtl_tx_algorithms(priv, priv->hw, + priv->plat->tx_sched_algorithm); + + /* Configure CBS in AVB TX queues */ + if (tx_queues_count > 1) + stmmac_configure_cbs(priv); + + /* Map RX MTL to DMA channels */ + stmmac_rx_queue_dma_chan_map(priv); + + /* Enable MAC RX Queues */ + stmmac_mac_enable_rx_queues(priv); + + /* Set RX priorities */ + if (rx_queues_count > 1) + stmmac_mac_config_rx_queues_prio(priv); + + /* Set TX priorities */ + if (tx_queues_count > 1) + stmmac_mac_config_tx_queues_prio(priv); + + /* Set RX routing */ + if (rx_queues_count > 1) + stmmac_mac_config_rx_queues_routing(priv); + + /* Receive Side Scaling */ + if (rx_queues_count > 1) + stmmac_mac_config_rss(priv); +} + +static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) +{ + if (priv->dma_cap.asp) { + netdev_info(priv->dev, "Enabling Safety Features\n"); + stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, + priv->plat->safety_feat_cfg); + } else { + netdev_info(priv->dev, "No Safety Features support found\n"); + } +} + +static int stmmac_fpe_start_wq(struct stmmac_priv *priv) +{ + char *name; + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); + clear_bit(__FPE_REMOVING, &priv->fpe_task_state); + + name = priv->wq_name; + sprintf(name, "%s-fpe", priv->dev->name); + + priv->fpe_wq = create_singlethread_workqueue(name); + if (!priv->fpe_wq) { + netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); + + return -ENOMEM; + } + netdev_info(priv->dev, "FPE workqueue start"); + + return 0; +} + +/** + * stmmac_hw_setup - setup mac in a usable state. + * @dev : pointer to the device structure. + * @ptp_register: register PTP if set + * Description: + * this is the main function to setup the HW in a usable state because the + * dma engine is reset, the core registers are configured (e.g. AXI, + * Checksum features, timers). The DMA is ready to start receiving and + * transmitting. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + bool sph_en; + u32 chan; + int ret; + + /* DMA initialization and SW reset */ + ret = stmmac_init_dma_engine(priv); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA engine initialization failed\n", + __func__); + return ret; + } + + /* Copy the MAC addr into the HW */ + stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); + + /* PS and related bits will be programmed according to the speed */ + if (priv->hw->pcs) { + int speed = priv->plat->mac_port_sel_speed; + + if ((speed == SPEED_10) || (speed == SPEED_100) || + (speed == SPEED_1000)) { + priv->hw->ps = speed; + } else { + dev_warn(priv->device, "invalid port speed\n"); + priv->hw->ps = 0; + } + } + + /* Initialize the MAC Core */ + stmmac_core_init(priv, priv->hw, dev); + + /* Initialize MTL*/ + stmmac_mtl_configuration(priv); + + /* Initialize Safety Features */ + stmmac_safety_feat_configuration(priv); + + ret = stmmac_rx_ipc(priv, priv->hw); + if (!ret) { + netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); + priv->plat->rx_coe = STMMAC_RX_COE_NONE; + priv->hw->rx_csum = 0; + } + + /* Enable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, true); + + /* Set the HW DMA mode and the COE */ + stmmac_dma_operation_mode(priv); + + stmmac_mmc_setup(priv); + + if (ptp_register) { + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + } + + ret = stmmac_init_ptp(priv); + if (ret == -EOPNOTSUPP) + netdev_info(priv->dev, "PTP not supported by HW\n"); + else if (ret) + netdev_warn(priv->dev, "PTP init failed\n"); + else if (ptp_register) + stmmac_ptp_register(priv); + + priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; + + /* Convert the timer from msec to usec */ + if (!priv->tx_lpi_timer) + priv->tx_lpi_timer = eee_timer * 1000; + + if (priv->use_riwt) { + u32 queue; + + for (queue = 0; queue < rx_cnt; queue++) { + if (!priv->rx_riwt[queue]) + priv->rx_riwt[queue] = DEF_DMA_RIWT; + + stmmac_rx_watchdog(priv, priv->ioaddr, + priv->rx_riwt[queue], queue); + } + } + + if (priv->hw->pcs) + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); + + /* set TX and RX rings length */ + stmmac_set_rings_length(priv); + + /* Enable TSO */ + if (priv->tso) { + for (chan = 0; chan < tx_cnt; chan++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + + /* TSO and TBS cannot co-exist */ + if (tx_q->tbs & STMMAC_TBS_AVAIL) + continue; + + stmmac_enable_tso(priv, priv->ioaddr, 1, chan); + } + } + + /* Enable Split Header */ + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + for (chan = 0; chan < rx_cnt; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + + + /* VLAN Tag Insertion */ + if (priv->dma_cap.vlins) + stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); + + /* TBS */ + for (chan = 0; chan < tx_cnt; chan++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + int enable = tx_q->tbs & STMMAC_TBS_AVAIL; + + stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); + } + + /* Configure real RX and TX queues */ + netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); + netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); + + /* Start the ball rolling... */ + stmmac_start_all_dma(priv); + + if (priv->dma_cap.fpesel) { + stmmac_fpe_start_wq(priv); + + if (priv->plat->fpe_cfg->enable) + stmmac_fpe_handshake(priv, true); + } + + return 0; +} + +static void stmmac_hw_teardown(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + clk_disable_unprepare(priv->plat->clk_ptp_ref); +} + +static void stmmac_free_irq(struct net_device *dev, + enum request_irq_err irq_err, int irq_idx) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int j; + + switch (irq_err) { + case REQ_IRQ_ERR_ALL: + irq_idx = priv->plat->tx_queues_to_use; + fallthrough; + case REQ_IRQ_ERR_TX: + for (j = irq_idx - 1; j >= 0; j--) { + if (priv->tx_irq[j] > 0) { + irq_set_affinity_hint(priv->tx_irq[j], NULL); + free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); + } + } + irq_idx = priv->plat->rx_queues_to_use; + fallthrough; + case REQ_IRQ_ERR_RX: + for (j = irq_idx - 1; j >= 0; j--) { + if (priv->rx_irq[j] > 0) { + irq_set_affinity_hint(priv->rx_irq[j], NULL); + free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); + } + } + + if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) + free_irq(priv->sfty_ue_irq, dev); + fallthrough; + case REQ_IRQ_ERR_SFTY_UE: + if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) + free_irq(priv->sfty_ce_irq, dev); + fallthrough; + case REQ_IRQ_ERR_SFTY_CE: + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) + free_irq(priv->lpi_irq, dev); + fallthrough; + case REQ_IRQ_ERR_LPI: + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) + free_irq(priv->wol_irq, dev); + fallthrough; + case REQ_IRQ_ERR_WOL: + free_irq(dev->irq, dev); + fallthrough; + case REQ_IRQ_ERR_MAC: + case REQ_IRQ_ERR_NO: + /* If MAC IRQ request error, no more IRQ to free */ + break; + } +} + +static int stmmac_request_irq_multi_msi(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + enum request_irq_err irq_err; + cpumask_t cpu_mask; + int irq_idx = 0; + char *int_name; + int ret; + int i; + + /* For common interrupt */ + int_name = priv->int_name_mac; + sprintf(int_name, "%s:%s", dev->name, "mac"); + ret = request_irq(dev->irq, stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc mac MSI %d (error: %d)\n", + __func__, dev->irq, ret); + irq_err = REQ_IRQ_ERR_MAC; + goto irq_error; + } + + /* Request the Wake IRQ in case of another line + * is used for WoL + */ + priv->wol_irq_disabled = true; + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { + int_name = priv->int_name_wol; + sprintf(int_name, "%s:%s", dev->name, "wol"); + ret = request_irq(priv->wol_irq, + stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc wol MSI %d (error: %d)\n", + __func__, priv->wol_irq, ret); + irq_err = REQ_IRQ_ERR_WOL; + goto irq_error; + } + } + + /* Request the LPI IRQ in case of another line + * is used for LPI + */ + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { + int_name = priv->int_name_lpi; + sprintf(int_name, "%s:%s", dev->name, "lpi"); + ret = request_irq(priv->lpi_irq, + stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc lpi MSI %d (error: %d)\n", + __func__, priv->lpi_irq, ret); + irq_err = REQ_IRQ_ERR_LPI; + goto irq_error; + } + } + + /* Request the Safety Feature Correctible Error line in + * case of another line is used + */ + if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { + int_name = priv->int_name_sfty_ce; + sprintf(int_name, "%s:%s", dev->name, "safety-ce"); + ret = request_irq(priv->sfty_ce_irq, + stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty ce MSI %d (error: %d)\n", + __func__, priv->sfty_ce_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY_CE; + goto irq_error; + } + } + + /* Request the Safety Feature Uncorrectible Error line in + * case of another line is used + */ + if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { + int_name = priv->int_name_sfty_ue; + sprintf(int_name, "%s:%s", dev->name, "safety-ue"); + ret = request_irq(priv->sfty_ue_irq, + stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty ue MSI %d (error: %d)\n", + __func__, priv->sfty_ue_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY_UE; + goto irq_error; + } + } + + /* Request Rx MSI irq */ + for (i = 0; i < priv->plat->rx_queues_to_use; i++) { + if (i >= MTL_MAX_RX_QUEUES) + break; + if (priv->rx_irq[i] == 0) + continue; + + int_name = priv->int_name_rx_irq[i]; + sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); + ret = request_irq(priv->rx_irq[i], + stmmac_msi_intr_rx, + 0, int_name, &priv->dma_conf.rx_queue[i]); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc rx-%d MSI %d (error: %d)\n", + __func__, i, priv->rx_irq[i], ret); + irq_err = REQ_IRQ_ERR_RX; + irq_idx = i; + goto irq_error; + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); + } + + /* Request Tx MSI irq */ + for (i = 0; i < priv->plat->tx_queues_to_use; i++) { + if (i >= MTL_MAX_TX_QUEUES) + break; + if (priv->tx_irq[i] == 0) + continue; + + int_name = priv->int_name_tx_irq[i]; + sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); + ret = request_irq(priv->tx_irq[i], + stmmac_msi_intr_tx, + 0, int_name, &priv->dma_conf.tx_queue[i]); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc tx-%d MSI %d (error: %d)\n", + __func__, i, priv->tx_irq[i], ret); + irq_err = REQ_IRQ_ERR_TX; + irq_idx = i; + goto irq_error; + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); + } + + return 0; + +irq_error: + stmmac_free_irq(dev, irq_err, irq_idx); + return ret; +} + +static int stmmac_request_irq_single(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + enum request_irq_err irq_err; + int ret; + + ret = request_irq(dev->irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); + irq_err = REQ_IRQ_ERR_MAC; + goto irq_error; + } + + /* Request the Wake IRQ in case of another line + * is used for WoL + */ + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { + ret = request_irq(priv->wol_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the WoL IRQ %d (%d)\n", + __func__, priv->wol_irq, ret); + irq_err = REQ_IRQ_ERR_WOL; + goto irq_error; + } + } + + /* Request the IRQ lines */ + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { + ret = request_irq(priv->lpi_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the LPI IRQ %d (%d)\n", + __func__, priv->lpi_irq, ret); + irq_err = REQ_IRQ_ERR_LPI; + goto irq_error; + } + } + + return 0; + +irq_error: + stmmac_free_irq(dev, irq_err, 0); + return ret; +} + +static int stmmac_request_irq(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + /* Request the IRQ lines */ + if (priv->plat->multi_msi_en) + ret = stmmac_request_irq_multi_msi(dev); + else + ret = stmmac_request_irq_single(dev); + + return ret; +} + +/** + * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue + * @priv: driver private structure + * @mtu: MTU to setup the dma queue and buf with + * Description: Allocate and generate a dma_conf based on the provided MTU. + * Allocate the Tx/Rx DMA queue and init them. + * Return value: + * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. + */ +static struct stmmac_dma_conf * +stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) +{ + struct stmmac_dma_conf *dma_conf; + int chan, bfsize, ret; + + dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); + if (!dma_conf) { + netdev_err(priv->dev, "%s: DMA conf allocation failed\n", + __func__); + return ERR_PTR(-ENOMEM); + } + + bfsize = stmmac_set_16kib_bfsize(priv, mtu); + if (bfsize < 0) + bfsize = 0; + + if (bfsize < BUF_SIZE_16KiB) + bfsize = stmmac_set_bfsize(mtu, 0); + + dma_conf->dma_buf_sz = bfsize; + /* Chose the tx/rx size from the already defined one in the + * priv struct. (if defined) + */ + dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; + dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; + + if (!dma_conf->dma_tx_size) + dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; + if (!dma_conf->dma_rx_size) + dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; + + /* Earlier check for TBS */ + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; + + /* Setup per-TXQ tbs flag before TX descriptor alloc */ + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; + } + + ret = alloc_dma_desc_resources(priv, dma_conf); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", + __func__); + goto alloc_error; + } + + ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", + __func__); + goto init_error; + } + + return dma_conf; + +init_error: + free_dma_desc_resources(priv, dma_conf); +alloc_error: + kfree(dma_conf); + return ERR_PTR(ret); +} + +/** + * __stmmac_open - open entry point of the driver + * @dev : pointer to the device structure. + * @dma_conf : structure to take the dma data + * Description: + * This function is the open entry point of the driver. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int __stmmac_open(struct net_device *dev, + struct stmmac_dma_conf *dma_conf) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int mode = priv->plat->phy_interface; + u32 chan; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + if (priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI && + (!priv->hw->xpcs || + xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { + ret = stmmac_init_phy(dev); + if (ret) { + netdev_err(priv->dev, + "%s: Cannot attach to PHY (error: %d)\n", + __func__, ret); + goto init_phy_error; + } + } + + /* Extra statistics */ + memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); + priv->xstats.threshold = tc; + + priv->rx_copybreak = STMMAC_RX_COPYBREAK; + + buf_sz = dma_conf->dma_buf_sz; + for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) + if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) + dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; + memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); + + stmmac_reset_queues_param(priv); + + if (priv->plat->serdes_powerup) { + ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); + if (ret < 0) { + netdev_err(priv->dev, "%s: Serdes powerup failed\n", + __func__); + goto init_error; + } + } + + ret = stmmac_hw_setup(dev, true); + if (ret < 0) { + netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); + goto init_error; + } + + if (!get_ecdev(priv)) { + stmmac_init_coalesce(priv); + } + + if (get_ecdev(priv)) { + rtnl_lock(); + } + + phylink_start(priv->phylink); + /* We may have called phylink_speed_down before */ + phylink_speed_up(priv->phylink); + if (get_ecdev(priv)) { + rtnl_unlock(); + } + + ret = stmmac_request_irq(dev); + if (ret) + goto irq_error; + + if (!get_ecdev(priv)) { + stmmac_enable_all_queues(priv); + netif_tx_start_all_queues(priv->dev); + stmmac_enable_all_dma_irq(priv); + } + + return 0; + +irq_error: + phylink_stop(priv->phylink); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + + stmmac_hw_teardown(dev); +init_error: + phylink_disconnect_phy(priv->phylink); +init_phy_error: + pm_runtime_put(priv->device); + return ret; +} + +static int stmmac_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct stmmac_dma_conf *dma_conf; + int ret; + + dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); + if (IS_ERR(dma_conf)) + return PTR_ERR(dma_conf); + + ret = __stmmac_open(dev, dma_conf); + if (ret) + free_dma_desc_resources(priv, dma_conf); + + kfree(dma_conf); + return ret; +} + +static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) +{ + set_bit(__FPE_REMOVING, &priv->fpe_task_state); + + if (priv->fpe_wq) { + destroy_workqueue(priv->fpe_wq); + priv->fpe_wq = NULL; + } + + netdev_info(priv->dev, "FPE workqueue stop"); +} + +/** + * stmmac_release - close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. + */ +static int stmmac_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; + + if (get_ecdev(priv)) { + rtnl_lock(); + } + if (device_may_wakeup(priv->device)) + phylink_speed_down(priv->phylink, false); + /* Stop and disconnect the PHY */ + phylink_stop(priv->phylink); + phylink_disconnect_phy(priv->phylink); + if (get_ecdev(priv)) { + rtnl_unlock(); + } + + stmmac_disable_all_queues(priv); + + if (!get_ecdev(priv)) { + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + } + + netif_tx_disable(dev); + + /* Free the IRQ lines */ + stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); + + if (priv->eee_enabled) { + priv->tx_path_in_lpi_mode = false; + del_timer_sync(&priv->eee_ctrl_timer); + } + + /* Stop TX/RX DMA and clear the descriptors */ + stmmac_stop_all_dma(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv, &priv->dma_conf); + + /* Disable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, false); + + /* Powerdown Serdes if there is */ + if (priv->plat->serdes_powerdown) + priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); + + if (!get_ecdev(priv)) { + netif_carrier_off(dev); + } + + stmmac_release_ptp(priv); + pm_runtime_put(priv->device); + + if (priv->dma_cap.fpesel) + stmmac_fpe_stop_wq(priv); + + return 0; +} + +static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, + struct stmmac_tx_queue *tx_q) +{ + u16 tag = 0x0, inner_tag = 0x0; + u32 inner_type = 0x0; + struct dma_desc *p; + + if (!priv->dma_cap.vlins) + return false; + if (!skb_vlan_tag_present(skb)) + return false; + if (skb->vlan_proto == htons(ETH_P_8021AD)) { + inner_tag = skb_vlan_tag_get(skb); + inner_type = STMMAC_VLAN_INSERT; + } + + tag = skb_vlan_tag_get(skb); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + p = &tx_q->dma_tx[tx_q->cur_tx]; + + if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) + return false; + + stmmac_set_tx_owner(priv, p); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); + return true; +} + +/** + * stmmac_tso_allocator - close entry point of the driver + * @priv: driver private structure + * @des: buffer start address + * @total_len: total length to fill in descriptors + * @last_segment: condition for the last descriptor + * @queue: TX queue index + * Description: + * This function fills descriptor and request new descriptors according to + * buffer length to fill + */ +static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, + int total_len, bool last_segment, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct dma_desc *desc; + u32 buff_size; + int tmp_len; + + tmp_len = total_len; + + while (tmp_len > 0) { + dma_addr_t curr_addr; + + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, + priv->dma_conf.dma_tx_size); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + desc = &tx_q->dma_tx[tx_q->cur_tx]; + + curr_addr = des + (total_len - tmp_len); + if (priv->dma_cap.addr64 <= 32) + desc->des0 = cpu_to_le32(curr_addr); + else + stmmac_set_desc_addr(priv, desc, curr_addr); + + buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? + TSO_MAX_BUFF_SIZE : tmp_len; + + stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, + 0, 1, + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), + 0, 0); + + tmp_len -= TSO_MAX_BUFF_SIZE; + } +} + +static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + int desc_size; + + if (likely(priv->extend_desc)) + desc_size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc_size = sizeof(struct dma_edesc); + else + desc_size = sizeof(struct dma_desc); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + wmb(); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); +} + +/** + * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) + * @skb : the socket buffer + * @dev : device pointer + * Description: this is the transmit function that is called on TSO frames + * (support available on GMAC4 and newer chips). + * Diagram below show the ring programming in case of TSO frames: + * + * First Descriptor + * -------- + * | DES0 |---> buffer1 = L2/L3/L4 header + * | DES1 |---> TCP Payload (can continue on next descr...) + * | DES2 |---> buffer 1 and 2 len + * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] + * -------- + * | + * ... + * | + * -------- + * | DES0 | --| Split TCP Payload on Buffers 1 and 2 + * | DES1 | --| + * | DES2 | --> buffer 1 and 2 len + * | DES3 | + * -------- + * + * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. + */ +static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dma_desc *desc, *first, *mss_desc = NULL; + struct stmmac_priv *priv = netdev_priv(dev); + int nfrags = skb_shinfo(skb)->nr_frags; + u32 queue = skb_get_queue_mapping(skb); + unsigned int first_entry, tx_packets; + int tmp_pay_len = 0, first_tx; + struct stmmac_tx_queue *tx_q; + bool has_vlan, set_ic; + u8 proto_hdr_len, hdr; + u32 pay_len, mss; + dma_addr_t des; + int i; + + tx_q = &priv->dma_conf.tx_queue[queue]; + first_tx = tx_q->cur_tx; + + /* Compute header lengths */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); + hdr = sizeof(struct udphdr); + } else { + proto_hdr_len = skb_tcp_all_headers(skb); + hdr = tcp_hdrlen(skb); + } + + /* Desc availability based on threshold should be enough safe */ + if (unlikely(stmmac_tx_avail(priv, queue) < + (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, + queue)); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx Ring full when queue awake\n", + __func__); + } + return NETDEV_TX_BUSY; + } + + pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ + + mss = skb_shinfo(skb)->gso_size; + + /* set new MSS value if needed */ + if (mss != tx_q->mss) { + if (tx_q->tbs & STMMAC_TBS_AVAIL) + mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; + + stmmac_set_mss(priv, mss_desc, mss); + tx_q->mss = mss; + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, + priv->dma_conf.dma_tx_size); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); + } + + if (netif_msg_tx_queued(priv)) { + pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", + __func__, hdr, proto_hdr_len, pay_len, mss); + pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, + skb->data_len); + } + + /* Check if VLAN can be inserted by HW */ + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); + + first_entry = tx_q->cur_tx; + WARN_ON(tx_q->tx_skbuff[first_entry]); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[first_entry].basic; + else + desc = &tx_q->dma_tx[first_entry]; + first = desc; + + if (has_vlan) + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); + + /* first descriptor: fill Headers on Buf1 */ + des = dma_map_single(priv->device, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + + tx_q->tx_skbuff_dma[first_entry].buf = des; + tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; + + if (priv->dma_cap.addr64 <= 32) { + first->des0 = cpu_to_le32(des); + + /* Fill start of payload in buff2 of first descriptor */ + if (pay_len) + first->des1 = cpu_to_le32(des + proto_hdr_len); + + /* If needed take extra descriptors to fill the remaining payload */ + tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; + } else { + stmmac_set_desc_addr(priv, first, des); + tmp_pay_len = pay_len; + des += proto_hdr_len; + pay_len = 0; + } + + stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); + + /* Prepare fragments */ + for (i = 0; i < nfrags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + des = skb_frag_dma_map(priv->device, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + + stmmac_tso_allocator(priv, des, skb_frag_size(frag), + (i == nfrags - 1), queue); + + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); + tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; + } + + tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; + + /* Only the last descriptor gets to point to the skb. */ + tx_q->tx_skbuff[tx_q->cur_tx] = skb; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; + + /* Manage tx mitigation */ + tx_packets = (tx_q->cur_tx + 1) - first_tx; + tx_q->tx_count_frames += tx_packets; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) + set_ic = true; + else if (!priv->tx_coal_frames[queue]) + set_ic = false; + else if (tx_packets > priv->tx_coal_frames[queue]) + set_ic = true; + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + desc = &tx_q->dma_tx[tx_q->cur_tx]; + + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, desc); + priv->xstats.tx_set_ic_bit++; + } + + /* We've used all descriptors we need for this skb, however, + * advance cur_tx so that it references a fresh descriptor. + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); + + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", + __func__); + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); + } + + dev->stats.tx_bytes += skb->len; + priv->xstats.tx_tso_frames++; + priv->xstats.tx_tso_nfrags += nfrags; + + if (priv->sarc_type) + stmmac_set_desc_sarc(priv, first, priv->sarc_type); + + skb_tx_timestamp(skb); + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + stmmac_enable_tx_timestamp(priv, first); + } + + /* Complete the first descriptor before granting the DMA */ + stmmac_prepare_tso_tx_desc(priv, first, 1, + proto_hdr_len, + pay_len, + 1, tx_q->tx_skbuff_dma[first_entry].last_segment, + hdr / 4, (skb->len - proto_hdr_len)); + + /* If context desc is used to change MSS */ + if (mss_desc) { + /* Make sure that first descriptor has been completely + * written, including its own bit. This is because MSS is + * actually before first descriptor, so we need to make + * sure that MSS's own bit is the last thing written. + */ + dma_wmb(); + stmmac_set_tx_owner(priv, mss_desc); + } + + if (netif_msg_pktdata(priv)) { + pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, + tx_q->cur_tx, first, nfrags); + pr_info(">>> frame to be transmitted: "); + print_pkt(skb->data, skb_headlen(skb)); + } + + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + + stmmac_flush_tx_descriptors(priv, queue); + stmmac_tx_timer_arm(priv, queue); + + return NETDEV_TX_OK; + +dma_map_err: + dev_err(priv->device, "Tx dma map failed\n"); + if (!get_ecdev(priv)) + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +/** + * stmmac_xmit - Tx entry point of the driver + * @skb : the socket buffer + * @dev : device pointer + * Description : this is the tx entry point of the driver. + * It programs the chain or the ring and supports oversized frames + * and SG feature. + */ +static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned int first_entry, tx_packets, enh_desc; + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int nopaged_len = skb_headlen(skb); + int i, csum_insertion = 0, is_jumbo = 0; + u32 queue = skb_get_queue_mapping(skb); + int nfrags = skb_shinfo(skb)->nr_frags; + int gso = skb_shinfo(skb)->gso_type; + struct dma_edesc *tbs_desc = NULL; + struct dma_desc *desc, *first; + struct stmmac_tx_queue *tx_q; + bool has_vlan, set_ic; + int entry, first_tx; + dma_addr_t des; + + tx_q = &priv->dma_conf.tx_queue[queue]; + first_tx = tx_q->cur_tx; + + if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) + stmmac_disable_eee_mode(priv); + + /* Manage oversized TCP frames for GMAC4 device */ + if (skb_is_gso(skb) && priv->tso) { + if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) + return stmmac_tso_xmit(skb, dev); + if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) + return stmmac_tso_xmit(skb, dev); + } + + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, + queue)); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx Ring full when queue awake\n", + __func__); + } + return NETDEV_TX_BUSY; + } + + /* Check if VLAN can be inserted by HW */ + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); + + entry = tx_q->cur_tx; + first_entry = entry; + WARN_ON(tx_q->tx_skbuff[first_entry]); + + csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); + + if (likely(priv->extend_desc)) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[entry].basic; + else + desc = tx_q->dma_tx + entry; + + first = desc; + + if (has_vlan) + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); + + enh_desc = priv->plat->enh_desc; + /* To program the descriptors according to the size of the frame */ + if (enh_desc) + is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); + + if (unlikely(is_jumbo)) { + entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); + if (unlikely(entry < 0) && (entry != -EINVAL)) + goto dma_map_err; + } + + for (i = 0; i < nfrags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int len = skb_frag_size(frag); + bool last_segment = (i == (nfrags - 1)); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + WARN_ON(tx_q->tx_skbuff[entry]); + + if (likely(priv->extend_desc)) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[entry].basic; + else + desc = tx_q->dma_tx + entry; + + des = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; /* should reuse desc w/o issues */ + + tx_q->tx_skbuff_dma[entry].buf = des; + + stmmac_set_desc_addr(priv, desc, des); + + tx_q->tx_skbuff_dma[entry].map_as_page = true; + tx_q->tx_skbuff_dma[entry].len = len; + tx_q->tx_skbuff_dma[entry].last_segment = last_segment; + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; + + /* Prepare the descriptor and set the own bit too */ + stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, + priv->mode, 1, last_segment, skb->len); + } + + /* Only the last descriptor gets to point to the skb. */ + tx_q->tx_skbuff[entry] = skb; + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; + + /* According to the coalesce parameter the IC bit for the latest + * segment is reset and the timer re-started to clean the tx status. + * This approach takes care about the fragments: desc is the first + * element in case of no SG. + */ + tx_packets = (entry + 1) - first_tx; + tx_q->tx_count_frames += tx_packets; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) + set_ic = true; + else if (!priv->tx_coal_frames[queue]) + set_ic = false; + else if (tx_packets > priv->tx_coal_frames[queue]) + set_ic = true; + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + if (likely(priv->extend_desc)) + desc = &tx_q->dma_etx[entry].basic; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[entry].basic; + else + desc = &tx_q->dma_tx[entry]; + + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, desc); + priv->xstats.tx_set_ic_bit++; + } + + /* We've used all descriptors we need for this skb, however, + * advance cur_tx so that it references a fresh descriptor. + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + tx_q->cur_tx = entry; + + if (netif_msg_pktdata(priv)) { + netdev_dbg(priv->dev, + "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, + entry, first, nfrags); + + netdev_dbg(priv->dev, ">>> frame to be transmitted: "); + print_pkt(skb->data, skb->len); + } + + if ((!get_ecdev(priv)) && (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1)))) { + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", + __func__); + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); + } + + dev->stats.tx_bytes += skb->len; + + if (priv->sarc_type) + stmmac_set_desc_sarc(priv, first, priv->sarc_type); + + skb_tx_timestamp(skb); + + /* Ready to fill the first descriptor and set the OWN bit w/o any + * problems because all the descriptors are actually ready to be + * passed to the DMA engine. + */ + if (likely(!is_jumbo)) { + bool last_segment = (nfrags == 0); + + des = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + + tx_q->tx_skbuff_dma[first_entry].buf = des; + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; + + stmmac_set_desc_addr(priv, first, des); + + tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; + tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + stmmac_enable_tx_timestamp(priv, first); + } + + /* Prepare the first descriptor setting the OWN bit too */ + stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, + csum_insertion, priv->mode, 0, last_segment, + skb->len); + } + + if (tx_q->tbs & STMMAC_TBS_EN) { + struct timespec64 ts = ns_to_timespec64(skb->tstamp); + + tbs_desc = &tx_q->dma_entx[first_entry]; + stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); + } + + stmmac_set_tx_owner(priv, first); + + if (!get_ecdev(priv)) + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + stmmac_flush_tx_descriptors(priv, queue); + stmmac_tx_timer_arm(priv, queue); + + return NETDEV_TX_OK; + +dma_map_err: + netdev_err(priv->dev, "Tx DMA map failed\n"); + if (!get_ecdev(priv)) + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) +{ + struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); + __be16 vlan_proto = veth->h_vlan_proto; + u16 vlanid; + + if ((vlan_proto == htons(ETH_P_8021Q) && + dev->features & NETIF_F_HW_VLAN_CTAG_RX) || + (vlan_proto == htons(ETH_P_8021AD) && + dev->features & NETIF_F_HW_VLAN_STAG_RX)) { + /* pop the vlan tag */ + vlanid = ntohs(veth->h_vlan_TCI); + memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); + } +} + +/** + * stmmac_rx_refill - refill used skb preallocated buffers + * @priv: driver private structure + * @queue: RX queue index + * Description : this is to reallocate the skb for the reception process + * that is based on zero-copy. + */ +static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + int dirty = stmmac_rx_dirty(priv, queue); + unsigned int entry = rx_q->dirty_rx; + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + if (priv->dma_cap.host_dma_width <= 32) + gfp |= GFP_DMA32; + + while (dirty-- > 0) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; + struct dma_desc *p; + bool use_rx_wd; + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + entry); + else + p = rx_q->dma_rx + entry; + + if (!buf->page) { + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); + if (!buf->page) + break; + } + + if (priv->sph && !buf->sec_page) { + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); + if (!buf->sec_page) + break; + + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); + } + + buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; + + stmmac_set_desc_addr(priv, p, buf->addr); + if (priv->sph) + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); + else + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); + stmmac_refill_desc3(priv, rx_q, p); + + rx_q->rx_count_frames++; + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) + rx_q->rx_count_frames = 0; + + use_rx_wd = !priv->rx_coal_frames[queue]; + use_rx_wd |= rx_q->rx_count_frames > 0; + if (!priv->use_riwt) + use_rx_wd = false; + + dma_wmb(); + stmmac_set_rx_owner(priv, p, use_rx_wd); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); + } + rx_q->dirty_rx = entry; + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->dirty_rx * sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); +} + +static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, + struct dma_desc *p, + int status, unsigned int len) +{ + unsigned int plen = 0, hlen = 0; + int coe = priv->hw->rx_csum; + + /* Not first descriptor, buffer is always zero */ + if (priv->sph && len) + return 0; + + /* First descriptor, get split header length */ + stmmac_get_rx_header_len(priv, p, &hlen); + if (priv->sph && hlen) { + priv->xstats.rx_split_hdr_pkt_n++; + return hlen; + } + + /* First descriptor, not last descriptor and not split header */ + if (status & rx_not_ls) + return priv->dma_conf.dma_buf_sz; + + plen = stmmac_get_rx_frame_len(priv, p, coe); + + /* First descriptor and last descriptor and not split header */ + return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); +} + +static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, + struct dma_desc *p, + int status, unsigned int len) +{ + int coe = priv->hw->rx_csum; + unsigned int plen = 0; + + /* Not split header, buffer is not available */ + if (!priv->sph) + return 0; + + /* Not last descriptor */ + if (status & rx_not_ls) + return priv->dma_conf.dma_buf_sz; + + plen = stmmac_get_rx_frame_len(priv, p, coe); + + /* Last descriptor */ + return plen - len; +} + +static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, + struct xdp_frame *xdpf, bool dma_map) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc; + dma_addr_t dma_addr; + bool set_ic; + + if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) + return STMMAC_XDP_CONSUMED; + + if (likely(priv->extend_desc)) + tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_desc = &tx_q->dma_entx[entry].basic; + else + tx_desc = tx_q->dma_tx + entry; + + if (dma_map) { + dma_addr = dma_map_single(priv->device, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, dma_addr)) + return STMMAC_XDP_CONSUMED; + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; + } else { + struct page *page = virt_to_page(xdpf->data); + + dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + + xdpf->headroom; + dma_sync_single_for_device(priv->device, dma_addr, + xdpf->len, DMA_BIDIRECTIONAL); + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; + } + + tx_q->tx_skbuff_dma[entry].buf = dma_addr; + tx_q->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].len = xdpf->len; + tx_q->tx_skbuff_dma[entry].last_segment = true; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + tx_q->xdpf[entry] = xdpf; + + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, + true, priv->mode, true, true, + xdpf->len); + + tx_q->tx_count_frames++; + + if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, tx_desc); + priv->xstats.tx_set_ic_bit++; + } + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + tx_q->cur_tx = entry; + + return STMMAC_XDP_TX; +} + +static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= priv->plat->tx_queues_to_use) + index -= priv->plat->tx_queues_to_use; + + return index; +} + +static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, + struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + int queue; + int res; + + if (unlikely(!xdpf)) + return STMMAC_XDP_CONSUMED; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + nq = netdev_get_tx_queue(priv->dev, queue); + + __netif_tx_lock(nq, cpu); + /* Avoids TX time-out as we are sharing with slow path */ + ec_txq_trans_cond_update(nq); + + res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); + if (res == STMMAC_XDP_TX) + stmmac_flush_tx_descriptors(priv, queue); + + __netif_tx_unlock(nq); + + return res; +} + +static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act; + int res; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: + res = STMMAC_XDP_PASS; + break; + case XDP_TX: + res = stmmac_xdp_xmit_back(priv, xdp); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(priv->dev, xdp, prog) < 0) + res = STMMAC_XDP_CONSUMED; + else + res = STMMAC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(priv->dev, prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->dev, prog, act); + fallthrough; + case XDP_DROP: + res = STMMAC_XDP_CONSUMED; + break; + } + + return res; +} + +static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(priv->xdp_prog); + if (!prog) { + res = STMMAC_XDP_PASS; + goto out; + } + + res = __stmmac_xdp_run_prog(priv, prog, xdp); +out: + return ERR_PTR(-res); +} + +static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, + int xdp_status) +{ + int cpu; + int queue; + if(get_ecdev(priv)) { + return; + } + cpu = smp_processor_id(); + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + + if (xdp_status & STMMAC_XDP_TX) + stmmac_tx_timer_arm(priv, queue); + + if (xdp_status & STMMAC_XDP_REDIRECT) + xdp_do_flush(); +} + +static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + skb = __napi_alloc_skb(&ch->rxtx_napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + return skb; +} + +static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, + struct dma_desc *p, struct dma_desc *np, + struct xdp_buff *xdp) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned int len = xdp->data_end - xdp->data; + enum pkt_hash_types hash_type; + int coe = priv->hw->rx_csum; + struct sk_buff *skb; + u32 hash; + + skb = stmmac_construct_skb_zc(ch, xdp); + if (!skb) { + priv->dev->stats.rx_dropped++; + return; + } + + stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) + skb_set_hash(skb, hash, hash_type); + + skb_record_rx_queue(skb, queue); + napi_gro_receive(&ch->rxtx_napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += len; +} + +static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + unsigned int entry = rx_q->dirty_rx; + struct dma_desc *rx_desc = NULL; + bool ret = true; + + budget = min(budget, stmmac_rx_dirty(priv, queue)); + + while (budget-- > 0 && entry != rx_q->cur_rx) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; + dma_addr_t dma_addr; + bool use_rx_wd; + + if (!buf->xdp) { + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) { + ret = false; + break; + } + } + + if (priv->extend_desc) + rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); + else + rx_desc = rx_q->dma_rx + entry; + + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, rx_desc, dma_addr); + stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); + stmmac_refill_desc3(priv, rx_q, rx_desc); + + rx_q->rx_count_frames++; + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) + rx_q->rx_count_frames = 0; + + use_rx_wd = !priv->rx_coal_frames[queue]; + use_rx_wd |= rx_q->rx_count_frames > 0; + if (!priv->use_riwt) + use_rx_wd = false; + + dma_wmb(); + stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); + } + + if (rx_desc) { + rx_q->dirty_rx = entry; + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->dirty_rx * sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); + } + + return ret; +} + +static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + unsigned int count = 0, error = 0, len = 0; + int dirty = stmmac_rx_dirty(priv, queue); + unsigned int next_entry = rx_q->cur_rx; + unsigned int desc_size; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + int status = 0; + + if (netif_msg_rx_status(priv)) { + void *rx_head; + + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); + if (priv->extend_desc) { + rx_head = (void *)rx_q->dma_erx; + desc_size = sizeof(struct dma_extended_desc); + } else { + rx_head = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } + + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); + } + while (count < limit) { + struct stmmac_rx_buffer *buf; + unsigned int buf1_len = 0; + struct dma_desc *np, *p; + int entry; + int res; + + if (!count && rx_q->state_saved) { + error = rx_q->state.error; + len = rx_q->state.len; + } else { + rx_q->state_saved = false; + error = 0; + len = 0; + } + + if (count >= limit) + break; + +read_again: + buf1_len = 0; + entry = next_entry; + buf = &rx_q->buf_pool[entry]; + + if (dirty >= STMMAC_RX_FILL_BATCH) { + failure = failure || + !stmmac_rx_refill_zc(priv, queue, dirty); + dirty = 0; + } + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + entry); + else + p = rx_q->dma_rx + entry; + + /* read the status of the incoming frame */ + status = stmmac_rx_status(priv, &priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + if (unlikely(status & dma_own)) + break; + + /* Prefetch the next RX descriptor */ + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, + priv->dma_conf.dma_rx_size); + next_entry = rx_q->cur_rx; + + if (priv->extend_desc) + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); + else + np = rx_q->dma_rx + next_entry; + + prefetch(np); + + /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + + if (priv->extend_desc) + stmmac_rx_extended_status(priv, &priv->dev->stats, + &priv->xstats, + rx_q->dma_erx + entry); + if (unlikely(status == discard_frame)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + error = 1; + if (!priv->hwts_rx_en) + priv->dev->stats.rx_errors++; + } + + if (unlikely(error && (status & rx_not_ls))) + goto read_again; + if (unlikely(error)) { + count++; + continue; + } + + /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ + if (likely(status & rx_not_ls)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + count++; + goto read_again; + } + + /* XDP ZC Frame only support primary buffers for now */ + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); + len += buf1_len; + + /* ACS is disabled; strip manually. */ + if (likely(!(status & rx_not_ls))) { + buf1_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } + + /* RX buffer is good and fit into a XSK pool buffer */ + buf->xdp->data_end = buf->xdp->data + buf1_len; + xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); + + prog = READ_ONCE(priv->xdp_prog); + res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); + + switch (res) { + case STMMAC_XDP_PASS: + stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); + xsk_buff_free(buf->xdp); + break; + case STMMAC_XDP_CONSUMED: + xsk_buff_free(buf->xdp); + priv->dev->stats.rx_dropped++; + break; + case STMMAC_XDP_TX: + case STMMAC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + buf->xdp = NULL; + dirty++; + count++; + } + + if (status & rx_not_ls) { + rx_q->state_saved = true; + rx_q->state.error = error; + rx_q->state.len = len; + } + + stmmac_finalize_xdp_rx(priv, xdp_status); + + priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; + + if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { + if (failure || stmmac_rx_dirty(priv, queue) > 0) + xsk_set_rx_need_wakeup(rx_q->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx_q->xsk_pool); + + return (int)count; + } + + return failure ? limit : (int)count; +} + +/** + * stmmac_rx - manage the receive process + * @priv: driver private structure + * @limit: napi bugget + * @queue: RX queue index. + * Description : this the function called by the napi poll method. + * It gets all the frames inside the ring. + */ +static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned int count = 0, error = 0, len = 0; + int status = 0, coe = priv->hw->rx_csum; + unsigned int next_entry = rx_q->cur_rx; + enum dma_data_direction dma_dir; + unsigned int desc_size; + struct sk_buff *skb = NULL; + struct xdp_buff xdp; + int xdp_status = 0; + int buf_sz; + + dma_dir = page_pool_get_dma_dir(rx_q->page_pool); + buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; + limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); + + if (netif_msg_rx_status(priv)) { + void *rx_head; + + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); + if (priv->extend_desc) { + rx_head = (void *)rx_q->dma_erx; + desc_size = sizeof(struct dma_extended_desc); + } else { + rx_head = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } + + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); + } + while (count < limit) { + unsigned int buf1_len = 0, buf2_len = 0; + enum pkt_hash_types hash_type; + struct stmmac_rx_buffer *buf; + struct dma_desc *np, *p; + int entry; + u32 hash; + + if (!count && rx_q->state_saved) { + skb = rx_q->state.skb; + error = rx_q->state.error; + len = rx_q->state.len; + } else { + rx_q->state_saved = false; + skb = NULL; + error = 0; + len = 0; + } + +read_again: + if (count >= limit) + break; + + buf1_len = 0; + buf2_len = 0; + entry = next_entry; + buf = &rx_q->buf_pool[entry]; + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + entry); + else + p = rx_q->dma_rx + entry; + + /* read the status of the incoming frame */ + status = stmmac_rx_status(priv, &priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + netdev_dbg(priv->dev, "stmmac_rx: error: %d, status: %x\n", error, status); + if (unlikely(status & dma_own)) + { + break; + } + + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, + priv->dma_conf.dma_rx_size); + next_entry = rx_q->cur_rx; + + if (priv->extend_desc) + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); + else + np = rx_q->dma_rx + next_entry; + + prefetch(np); + + if (priv->extend_desc) + stmmac_rx_extended_status(priv, &priv->dev->stats, + &priv->xstats, rx_q->dma_erx + entry); + if (unlikely(status == discard_frame)) { + netdev_dbg(priv->dev, "discard frame"); + if (!get_ecdev(priv)) { + page_pool_recycle_direct(rx_q->page_pool, buf->page); + buf->page = NULL; + } + error = 1; + if (!priv->hwts_rx_en) + priv->dev->stats.rx_errors++; + } + + if (unlikely(error && (status & rx_not_ls))) + { + goto read_again; + } + if (unlikely(error)) { + dev_kfree_skb(skb); + skb = NULL; + count++; + continue; + } + + /* Buffer is good. Go on. */ + dma_rmb(); + netdev_dbg(priv->dev, "buffer good"); + + prefetch(page_address(buf->page) + buf->page_offset); + if (buf->sec_page) + prefetch(page_address(buf->sec_page)); + + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); + len += buf1_len; + buf2_len = stmmac_rx_buf2_len(priv, p, status, len); + len += buf2_len; + + /* ACS is disabled; strip manually. */ + if (likely(!(status & rx_not_ls))) { + if (buf2_len) { + buf2_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } else if (buf1_len) { + buf1_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } + } + if (get_ecdev(priv)) { + unsigned char *va; + + dma_sync_single_for_cpu(priv->device, buf->addr, + buf1_len, dma_dir); + va = page_address(buf->page) + buf->page_offset; + ecdev_receive(get_ecdev(priv), va, buf1_len); + netdev_dbg(priv->dev, "ecdev_receive: %u", buf1_len); + priv->ec_watchdog_jiffies = jiffies; + /* keep the page and pass it back to the device manually */ + dma_sync_single_for_device(priv->device, buf->addr, + buf1_len, dma_dir); + count++; + continue; + } + + if (!skb) { + unsigned int pre_len, sync_len; + + dma_sync_single_for_cpu(priv->device, buf->addr, + buf1_len, dma_dir); + + xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq); + xdp_prepare_buff(&xdp, page_address(buf->page), + buf->page_offset, buf1_len, false); + + pre_len = xdp.data_end - xdp.data_hard_start - + buf->page_offset; + skb = stmmac_xdp_run_prog(priv, &xdp); + /* Due xdp_adjust_tail: DMA sync for_device + * cover max len CPU touch + */ + sync_len = xdp.data_end - xdp.data_hard_start - + buf->page_offset; + sync_len = max(sync_len, pre_len); + + /* For Not XDP_PASS verdict */ + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & STMMAC_XDP_CONSUMED) { + page_pool_put_page(rx_q->page_pool, + virt_to_head_page(xdp.data), + sync_len, true); + buf->page = NULL; + priv->dev->stats.rx_dropped++; + + /* Clear skb as it was set as + * status by XDP program. + */ + skb = NULL; + + if (unlikely((status & rx_not_ls))) + goto read_again; + + count++; + continue; + } else if (xdp_res & (STMMAC_XDP_TX | + STMMAC_XDP_REDIRECT)) { + xdp_status |= xdp_res; + buf->page = NULL; + skb = NULL; + count++; + continue; + } + } + } + + if (!skb) { + /* XDP program may expand or reduce tail */ + buf1_len = xdp.data_end - xdp.data; + skb = napi_alloc_skb(&ch->rx_napi, buf1_len); + if (!skb) { + priv->dev->stats.rx_dropped++; + count++; + goto drain_data; + } + /* XDP program may adjust header */ + skb_copy_to_linear_data(skb, xdp.data, buf1_len); + skb_put(skb, buf1_len); + + /* Data payload copied into SKB, page ready for recycle */ + page_pool_recycle_direct(rx_q->page_pool, buf->page); + buf->page = NULL; + } else if (buf1_len) { + dma_sync_single_for_cpu(priv->device, buf->addr, + buf1_len, dma_dir); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + buf->page, buf->page_offset, buf1_len, + priv->dma_conf.dma_buf_sz); + + /* Data payload appended into SKB */ + page_pool_release_page(rx_q->page_pool, buf->page); + buf->page = NULL; + } + + if (buf2_len) { + dma_sync_single_for_cpu(priv->device, buf->sec_addr, + buf2_len, dma_dir); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + buf->sec_page, 0, buf2_len, + priv->dma_conf.dma_buf_sz); + + /* Data payload appended into SKB */ + page_pool_release_page(rx_q->page_pool, buf->sec_page); + buf->sec_page = NULL; + } + +drain_data: + if (likely(status & rx_not_ls)) + goto read_again; + if (!skb) + continue; + + /* Got entire packet into SKB. Finish it. */ + + stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) + skb_set_hash(skb, hash, hash_type); + + skb_record_rx_queue(skb, queue); + napi_gro_receive(&ch->rx_napi, skb); + skb = NULL; + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += len; + count++; + } + + if (status & rx_not_ls || skb) { + rx_q->state_saved = true; + rx_q->state.skb = skb; + rx_q->state.error = error; + rx_q->state.len = len; + } + + stmmac_finalize_xdp_rx(priv, xdp_status); + + stmmac_rx_refill(priv, queue); + + priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; + + return count; +} + +static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, rx_napi); + struct stmmac_priv *priv = ch->priv_data; + u32 chan = ch->index; + int work_done; + + BUG_ON(get_ecdev(priv)); + + priv->xstats.napi_poll++; + + work_done = stmmac_rx(priv, budget, chan); + if (work_done < budget && napi_complete_done(napi, work_done)) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); + } + + return work_done; +} + +static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, tx_napi); + struct stmmac_priv *priv = ch->priv_data; + u32 chan = ch->index; + int work_done; + + BUG_ON(get_ecdev(priv)); + + priv->xstats.napi_poll++; + + work_done = stmmac_tx_clean(priv, budget, chan); + work_done = min(work_done, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } + + return work_done; +} + +static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, rxtx_napi); + struct stmmac_priv *priv = ch->priv_data; + int rx_done, tx_done, rxtx_done; + u32 chan = ch->index; + + BUG_ON(get_ecdev(priv)); + + priv->xstats.napi_poll++; + + tx_done = stmmac_tx_clean(priv, budget, chan); + tx_done = min(tx_done, budget); + + rx_done = stmmac_rx_zc(priv, budget, chan); + + rxtx_done = max(tx_done, rx_done); + + /* If either TX or RX work is not complete, return budget + * and keep pooling + */ + if (rxtx_done >= budget) + return budget; + + /* all work done, exit the polling mode */ + if (napi_complete_done(napi, rxtx_done)) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + /* Both RX and TX work done are compelte, + * so enable both RX & TX IRQs. + */ + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } + + return min(rxtx_done, budget - 1); +} + +/** + * stmmac_tx_timeout + * @dev : Pointer to net device structure + * @txqueue: the index of the hanging transmit queue + * Description: this function is called when a packet transmission fails to + * complete within a reasonable time. The driver will mark the error in the + * netdev structure and arrange for the device to be reset to a sane state + * in order to transmit a new packet. + */ +static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + stmmac_global_err(priv); +} + +/** + * stmmac_set_rx_mode - entry point for multicast addressing + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. + * Return value: + * void. + */ +static void stmmac_set_rx_mode(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + stmmac_set_filter(priv, priv->hw, dev); +} + +/** + * stmmac_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: the Maximum Transfer Unit (MTU) is used by the network layer + * to drive packet transmission. Ethernet has an MTU of 1500 octets + * (ETH_DATA_LEN). This value can be changed with ifconfig. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_change_mtu(struct net_device *dev, int new_mtu) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int txfifosz = priv->plat->tx_fifo_size; + struct stmmac_dma_conf *dma_conf; + const int mtu = new_mtu; + int ret; + + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + txfifosz /= priv->plat->tx_queues_to_use; + + if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); + return -EINVAL; + } + + new_mtu = STMMAC_ALIGN(new_mtu); + + /* If condition true, FIFO is too small or MTU too large */ + if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) + return -EINVAL; + + if (netif_running(dev)) { + netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); + /* Try to allocate the new DMA conf with the new mtu */ + dma_conf = stmmac_setup_dma_desc(priv, mtu); + if (IS_ERR(dma_conf)) { + netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", + mtu); + return PTR_ERR(dma_conf); + } + + stmmac_release(dev); + + ret = __stmmac_open(dev, dma_conf); + if (ret) { + free_dma_desc_resources(priv, dma_conf); + kfree(dma_conf); + netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); + return ret; + } + + kfree(dma_conf); + + stmmac_set_rx_mode(dev); + } + + dev->mtu = mtu; + netdev_update_features(dev); + + return 0; +} + +static netdev_features_t stmmac_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) + features &= ~NETIF_F_RXCSUM; + + if (!priv->plat->tx_coe) + features &= ~NETIF_F_CSUM_MASK; + + /* Some GMAC devices have a bugged Jumbo frame support that + * needs to have the Tx COE disabled for oversized frames + * (due to limited buffer sizes). In this case we disable + * the TX csum insertion in the TDES and not use SF. + */ + if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) + features &= ~NETIF_F_CSUM_MASK; + + /* Disable tso if asked by ethtool */ + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + if (features & NETIF_F_TSO) + priv->tso = true; + else + priv->tso = false; + } + + return features; +} + +static int stmmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + /* Keep the COE Type in case of csum is supporting */ + if (features & NETIF_F_RXCSUM) + priv->hw->rx_csum = priv->plat->rx_coe; + else + priv->hw->rx_csum = 0; + /* No check needed because rx_coe has been set before and it will be + * fixed in case of issue. + */ + stmmac_rx_ipc(priv, priv->hw); + + if (priv->sph_cap) { + bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; + u32 chan; + + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } + + return 0; +} + +static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (status == FPE_EVENT_UNKNOWN || !*hs_enable) + return; + + /* If LP has sent verify mPacket, LP is FPE capable */ + if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { + if (*lp_state < FPE_STATE_CAPABLE) + *lp_state = FPE_STATE_CAPABLE; + + /* If user has requested FPE enable, quickly response */ + if (*hs_enable) + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + fpe_cfg, + MPACKET_RESPONSE); + } + + /* If Local has sent verify mPacket, Local is FPE capable */ + if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { + if (*lo_state < FPE_STATE_CAPABLE) + *lo_state = FPE_STATE_CAPABLE; + } + + /* If LP has sent response mPacket, LP is entering FPE ON */ + if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) + *lp_state = FPE_STATE_ENTERING_ON; + + /* If Local has sent response mPacket, Local is entering FPE ON */ + if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) + *lo_state = FPE_STATE_ENTERING_ON; + + if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && + !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && + priv->fpe_wq) { + queue_work(priv->fpe_wq, &priv->fpe_task); + } +} + +static void stmmac_common_interrupt(struct stmmac_priv *priv) +{ + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queues_count; + u32 queue; + bool xmac; + + xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; + + if (priv->irq_wake) + pm_wakeup_event(priv->device, 0); + + if (priv->dma_cap.estsel) + stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, + &priv->xstats, tx_cnt); + + if (priv->dma_cap.fpesel) { + int status = stmmac_fpe_irq_status(priv, priv->ioaddr, + priv->dev); + + stmmac_fpe_event_status(priv, status); + } + + /* To handle GMAC own interrupts */ + if ((priv->plat->has_gmac) || xmac) { + int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); + + if (unlikely(status)) { + /* For LPI we need to save the tx status */ + if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) + priv->tx_path_in_lpi_mode = true; + if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) + priv->tx_path_in_lpi_mode = false; + } + + for (queue = 0; queue < queues_count; queue++) { + status = stmmac_host_mtl_irq_status(priv, priv->hw, + queue); + } + + /* PCS link status */ + if (priv->hw->pcs) { + if (priv->xstats.pcs_link) + if (get_ecdev(priv)) { + ecdev_set_link(get_ecdev(priv), 1); + } else { + netif_carrier_on(priv->dev); + } + else + if (get_ecdev(priv)) { + ecdev_set_link(get_ecdev(priv), 0); + } else { + netif_carrier_off(priv->dev); + } + } + + stmmac_timestamp_interrupt(priv, priv); + } +} + +/** + * stmmac_interrupt - main ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the main driver interrupt service routine. + * It can call: + * o DMA service routine (to manage incoming frame reception and transmission + * status) + * o Core interrupts to manage: remote wake-up, management counter, LPI + * interrupts. + */ +static irqreturn_t stmmac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* Check if a fatal error happened */ + if (stmmac_safety_feat_interrupt(priv)) + return IRQ_HANDLED; + + /* To handle Common interrupts */ + stmmac_common_interrupt(priv); + + /* To handle DMA interrupts */ + stmmac_dma_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* To handle Common interrupts */ + stmmac_common_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* Check if a fatal error happened */ + stmmac_safety_feat_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; + struct stmmac_dma_conf *dma_conf; + int chan = tx_q->queue_index; + struct stmmac_priv *priv; + int status; + + dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); + priv = container_of(dma_conf, struct stmmac_priv, dma_conf); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + status = stmmac_napi_check(priv, chan, DMA_DIR_TX); + + if (unlikely(status & tx_hard_error_bump_tc)) { + /* Try to bump up the dma threshold on this failure */ + stmmac_bump_dma_threshold(priv, chan); + } else if (unlikely(status == tx_hard_error)) { + stmmac_tx_err(priv, chan); + } + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) +{ + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; + struct stmmac_dma_conf *dma_conf; + int chan = rx_q->queue_index; + struct stmmac_priv *priv; + + dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); + priv = container_of(dma_conf, struct stmmac_priv, dma_conf); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + stmmac_napi_check(priv, chan, DMA_DIR_RX); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling receive - used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. + */ +static void stmmac_poll_controller(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + /* If adapter is down, do nothing */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return; + + if (priv->plat->multi_msi_en) { + for (i = 0; i < priv->plat->rx_queues_to_use; i++) + stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); + + for (i = 0; i < priv->plat->tx_queues_to_use; i++) + stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); + } else { + disable_irq(dev->irq); + stmmac_interrupt(dev->irq, dev); + enable_irq(dev->irq); + } +} +#endif + +/** + * stmmac_ioctl - Entry point for the Ioctl + * @dev: Device pointer. + * @rq: An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd: IOCTL command + * Description: + * Currently it supports the phy_mii_ioctl(...) and HW time stamping. + */ +static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct stmmac_priv *priv = netdev_priv (dev); + int ret = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + ret = phylink_mii_ioctl(priv->phylink, rq, cmd); + break; + case SIOCSHWTSTAMP: + ret = stmmac_hwtstamp_set(dev, rq); + break; + case SIOCGHWTSTAMP: + ret = stmmac_hwtstamp_get(dev, rq); + break; + default: + break; + } + + return ret; +} + +static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct stmmac_priv *priv = cb_priv; + int ret = -EOPNOTSUPP; + + if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) + return ret; + + __stmmac_disable_all_queues(priv); + + switch (type) { + case TC_SETUP_CLSU32: + ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); + break; + case TC_SETUP_CLSFLOWER: + ret = stmmac_tc_setup_cls(priv, priv, type_data); + break; + default: + break; + } + + stmmac_enable_all_queues(priv); + return ret; +} + +static LIST_HEAD(stmmac_block_cb_list); + +static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + if (get_ecdev(priv)) + return -EBUSY; + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &stmmac_block_cb_list, + stmmac_setup_tc_block_cb, + priv, priv, true); + case TC_SETUP_QDISC_CBS: + return stmmac_tc_setup_cbs(priv, priv, type_data); + case TC_SETUP_QDISC_TAPRIO: + return stmmac_tc_setup_taprio(priv, priv, type_data); + case TC_SETUP_QDISC_ETF: + return stmmac_tc_setup_etf(priv, priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + int gso = skb_shinfo(skb)->gso_type; + + if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { + /* + * There is no way to determine the number of TSO/USO + * capable Queues. Let's use always the Queue 0 + * because if TSO/USO is supported then at least this + * one will be capable. + */ + return 0; + } + + return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; +} + +static int stmmac_set_mac_address(struct net_device *ndev, void *addr) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + int ret = 0; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + ret = eth_mac_addr(ndev, addr); + if (ret) + goto set_mac_error; + + stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); + +set_mac_error: + pm_runtime_put(priv->device); + + return ret; +} + +#ifdef CONFIG_DEBUG_FS +static struct dentry *stmmac_fs_dir; + +static void sysfs_display_ring(void *head, int size, int extend_desc, + struct seq_file *seq, dma_addr_t dma_phy_addr) +{ + int i; + struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + struct dma_desc *p = (struct dma_desc *)head; + dma_addr_t dma_addr; + + for (i = 0; i < size; i++) { + if (extend_desc) { + dma_addr = dma_phy_addr + i * sizeof(*ep); + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(ep->basic.des0), + le32_to_cpu(ep->basic.des1), + le32_to_cpu(ep->basic.des2), + le32_to_cpu(ep->basic.des3)); + ep++; + } else { + dma_addr = dma_phy_addr + i * sizeof(*p); + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(p->des0), le32_to_cpu(p->des1), + le32_to_cpu(p->des2), le32_to_cpu(p->des3)); + p++; + } + seq_printf(seq, "\n"); + } +} + +static int stmmac_rings_status_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_count = priv->plat->rx_queues_to_use; + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; + + if ((dev->flags & IFF_UP) == 0) + return 0; + + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + + seq_printf(seq, "RX Queue %d:\n", queue); + + if (priv->extend_desc) { + seq_printf(seq, "Extended descriptor ring:\n"); + sysfs_display_ring((void *)rx_q->dma_erx, + priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); + } else { + seq_printf(seq, "Descriptor ring:\n"); + sysfs_display_ring((void *)rx_q->dma_rx, + priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); + } + } + + for (queue = 0; queue < tx_count; queue++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + + seq_printf(seq, "TX Queue %d:\n", queue); + + if (priv->extend_desc) { + seq_printf(seq, "Extended descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_etx, + priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); + } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { + seq_printf(seq, "Descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_tx, + priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); + } + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); + +static int stmmac_dma_cap_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->hw_cap_support) { + seq_printf(seq, "DMA HW features not supported\n"); + return 0; + } + + seq_printf(seq, "==============================\n"); + seq_printf(seq, "\tDMA HW features\n"); + seq_printf(seq, "==============================\n"); + + seq_printf(seq, "\t10/100 Mbps: %s\n", + (priv->dma_cap.mbps_10_100) ? "Y" : "N"); + seq_printf(seq, "\t1000 Mbps: %s\n", + (priv->dma_cap.mbps_1000) ? "Y" : "N"); + seq_printf(seq, "\tHalf duplex: %s\n", + (priv->dma_cap.half_duplex) ? "Y" : "N"); + seq_printf(seq, "\tHash Filter: %s\n", + (priv->dma_cap.hash_filter) ? "Y" : "N"); + seq_printf(seq, "\tMultiple MAC address registers: %s\n", + (priv->dma_cap.multi_addr) ? "Y" : "N"); + seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", + (priv->dma_cap.pcs) ? "Y" : "N"); + seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", + (priv->dma_cap.sma_mdio) ? "Y" : "N"); + seq_printf(seq, "\tPMT Remote wake up: %s\n", + (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); + seq_printf(seq, "\tPMT Magic Frame: %s\n", + (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); + seq_printf(seq, "\tRMON module: %s\n", + (priv->dma_cap.rmon) ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", + (priv->dma_cap.time_stamp) ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", + (priv->dma_cap.atime_stamp) ? "Y" : "N"); + seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", + (priv->dma_cap.eee) ? "Y" : "N"); + seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); + seq_printf(seq, "\tChecksum Offload in TX: %s\n", + (priv->dma_cap.tx_coe) ? "Y" : "N"); + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", + (priv->dma_cap.rx_coe) ? "Y" : "N"); + } else { + seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", + (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); + seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", + (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); + } + seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", + (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); + seq_printf(seq, "\tNumber of Additional RX channel: %d\n", + priv->dma_cap.number_rx_channel); + seq_printf(seq, "\tNumber of Additional TX channel: %d\n", + priv->dma_cap.number_tx_channel); + seq_printf(seq, "\tNumber of Additional RX queues: %d\n", + priv->dma_cap.number_rx_queues); + seq_printf(seq, "\tNumber of Additional TX queues: %d\n", + priv->dma_cap.number_tx_queues); + seq_printf(seq, "\tEnhanced descriptors: %s\n", + (priv->dma_cap.enh_desc) ? "Y" : "N"); + seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); + seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); + seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); + seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); + seq_printf(seq, "\tNumber of PPS Outputs: %d\n", + priv->dma_cap.pps_out_num); + seq_printf(seq, "\tSafety Features: %s\n", + priv->dma_cap.asp ? "Y" : "N"); + seq_printf(seq, "\tFlexible RX Parser: %s\n", + priv->dma_cap.frpsel ? "Y" : "N"); + seq_printf(seq, "\tEnhanced Addressing: %d\n", + priv->dma_cap.host_dma_width); + seq_printf(seq, "\tReceive Side Scaling: %s\n", + priv->dma_cap.rssen ? "Y" : "N"); + seq_printf(seq, "\tVLAN Hash Filtering: %s\n", + priv->dma_cap.vlhash ? "Y" : "N"); + seq_printf(seq, "\tSplit Header: %s\n", + priv->dma_cap.sphen ? "Y" : "N"); + seq_printf(seq, "\tVLAN TX Insertion: %s\n", + priv->dma_cap.vlins ? "Y" : "N"); + seq_printf(seq, "\tDouble VLAN: %s\n", + priv->dma_cap.dvlan ? "Y" : "N"); + seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", + priv->dma_cap.l3l4fnum); + seq_printf(seq, "\tARP Offloading: %s\n", + priv->dma_cap.arpoffsel ? "Y" : "N"); + seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", + priv->dma_cap.estsel ? "Y" : "N"); + seq_printf(seq, "\tFrame Preemption (FPE): %s\n", + priv->dma_cap.fpesel ? "Y" : "N"); + seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", + priv->dma_cap.tbssel ? "Y" : "N"); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); + +/* Use network device events to rename debugfs file entries. + */ +static int stmmac_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct stmmac_priv *priv = netdev_priv(dev); + + if (dev->netdev_ops != &stmmac_netdev_ops) + goto done; + + switch (event) { + case NETDEV_CHANGENAME: + if (priv->dbgfs_dir) + priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, + priv->dbgfs_dir, + stmmac_fs_dir, + dev->name); + break; + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block stmmac_notifier = { + .notifier_call = stmmac_device_event, +}; + +static void stmmac_init_fs(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + rtnl_lock(); + + /* Create per netdev entries */ + priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); + + /* Entry to report DMA RX/TX rings */ + debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, + &stmmac_rings_status_fops); + + /* Entry to report the DMA HW features */ + debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, + &stmmac_dma_cap_fops); + + rtnl_unlock(); +} + +static void stmmac_exit_fs(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + debugfs_remove_recursive(priv->dbgfs_dir); +} +#endif /* CONFIG_DEBUG_FS */ + +static u32 stmmac_vid_crc32_le(__le16 vid_le) +{ + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + u32 crc = ~0x0; + u32 temp = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= 0xedb88320; + } + + return crc; +} + +static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) +{ + u32 crc, hash = 0; + __le16 pmatch = 0; + int count = 0; + u16 vid = 0; + + for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { + __le16 vid_le = cpu_to_le16(vid); + crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; + hash |= (1 << crc); + count++; + } + + if (!priv->dma_cap.vlhash) { + if (count > 2) /* VID = 0 always passes filter */ + return -EOPNOTSUPP; + + pmatch = cpu_to_le16(vid); + hash = 0; + } + + return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); +} + +static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + bool is_double = false; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + if (be16_to_cpu(proto) == ETH_P_8021AD) + is_double = true; + + set_bit(vid, priv->active_vlans); + ret = stmmac_vlan_update(priv, is_double); + if (ret) { + clear_bit(vid, priv->active_vlans); + goto err_pm_put; + } + + if (priv->hw->num_vlan) { + ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (ret) + goto err_pm_put; + } +err_pm_put: + pm_runtime_put(priv->device); + + return ret; +} + +static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + bool is_double = false; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + if (be16_to_cpu(proto) == ETH_P_8021AD) + is_double = true; + + clear_bit(vid, priv->active_vlans); + + if (priv->hw->num_vlan) { + ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (ret) + goto del_vlan_error; + } + + ret = stmmac_vlan_update(priv, is_double); + +del_vlan_error: + pm_runtime_put(priv->device); + + return ret; +} + +static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (get_ecdev(priv)) { + return -EBUSY; + } + + switch (bpf->command) { + case XDP_SETUP_PROG: + return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + int i, nxmit = 0; + int queue; + + if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + nq = netdev_get_tx_queue(priv->dev, queue); + + __netif_tx_lock(nq, cpu); + /* Avoids TX time-out as we are sharing with slow path */ + ec_txq_trans_cond_update(nq); + + for (i = 0; i < num_frames; i++) { + int res; + + res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); + if (res == STMMAC_XDP_CONSUMED) + break; + + nxmit++; + } + + if (flags & XDP_XMIT_FLUSH) { + stmmac_flush_tx_descriptors(priv, queue); + stmmac_tx_timer_arm(priv, queue); + } + + __netif_tx_unlock(nq); + + return nxmit; +} + +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_rx_dma(priv, queue); + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); +} + +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + u32 buf_size; + int ret; + + ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc RX desc.\n"); + return; + } + + ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); + if (ret) { + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); + netdev_err(priv->dev, "Failed to init RX desc.\n"); + return; + } + + stmmac_reset_rx_queue(priv, queue); + stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, rx_q->queue_index); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, rx_q->queue_index); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_conf.dma_buf_sz, + rx_q->queue_index); + } + + stmmac_start_rx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); +} + +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_tx_dma(priv, queue); + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); +} + +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + int ret; + + ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc TX desc.\n"); + return; + } + + ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); + if (ret) { + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); + netdev_err(priv->dev, "Failed to init TX desc.\n"); + return; + } + + stmmac_reset_tx_queue(priv, queue); + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, tx_q->queue_index); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, tx_q->queue_index); + + stmmac_start_tx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); +} + +void stmmac_xdp_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; + + /* Ensure tx function is not running */ + netif_tx_disable(dev); + + BUG_ON(get_ecdev(priv)); + + /* Disable NAPI process */ + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + + /* Free the IRQ lines */ + stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); + + /* Stop TX/RX DMA channels */ + stmmac_stop_all_dma(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv, &priv->dma_conf); + + /* Disable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, false); + + /* set trans_start so we don't get spurious + * watchdogs during reset + */ + netif_trans_update(dev); + netif_carrier_off(dev); +} + +int stmmac_xdp_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_cnt, tx_cnt); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + u32 buf_size; + bool sph_en; + u32 chan; + int ret; + + BUG_ON(get_ecdev(priv)); + + ret = alloc_dma_desc_resources(priv, &priv->dma_conf); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors allocation failed\n", + __func__); + goto dma_desc_error; + } + + ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors initialization failed\n", + __func__); + goto init_error; + } + + stmmac_reset_queues_param(priv); + + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) { + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } + + /* Adjust Split header */ + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_cnt; chan++) { + rx_q = &priv->dma_conf.rx_queue[chan]; + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_conf.dma_buf_sz, + rx_q->queue_index); + } + + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } + + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_cnt; chan++) { + tx_q = &priv->dma_conf.tx_queue[chan]; + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); + + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + tx_q->txtimer.function = stmmac_tx_timer; + } + + /* Enable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, true); + + /* Start Rx & Tx DMA Channels */ + stmmac_start_all_dma(priv); + + ret = stmmac_request_irq(dev); + if (ret) + goto irq_error; + + /* Enable NAPI process*/ + stmmac_enable_all_queues(priv); + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + stmmac_enable_all_dma_irq(priv); + + return 0; + +irq_error: + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + + stmmac_hw_teardown(dev); +init_error: + free_dma_desc_resources(priv, &priv->dma_conf); +dma_desc_error: + return ret; +} + +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + struct stmmac_channel *ch; + + if (get_ecdev(priv)) { + return -EBUSY; + } + + if (test_bit(STMMAC_DOWN, &priv->state) || + !netif_carrier_ok(priv->dev)) + return -ENETDOWN; + + if (!stmmac_xdp_is_enabled(priv)) + return -EINVAL; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + rx_q = &priv->dma_conf.rx_queue[queue]; + tx_q = &priv->dma_conf.tx_queue[queue]; + ch = &priv->channel[queue]; + + if (!rx_q->xsk_pool && !tx_q->xsk_pool) + return -EINVAL; + + if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { + /* EQoS does not have per-DMA channel SW interrupt, + * so we schedule RX Napi straight-away. + */ + if (likely(napi_schedule_prep(&ch->rxtx_napi))) + __napi_schedule(&ch->rxtx_napi); + } + + return 0; +} + +static const struct net_device_ops stmmac_netdev_ops = { + .ndo_open = stmmac_open, + .ndo_start_xmit = stmmac_xmit, + .ndo_stop = stmmac_release, + .ndo_change_mtu = stmmac_change_mtu, + .ndo_fix_features = stmmac_fix_features, + .ndo_set_features = stmmac_set_features, + .ndo_set_rx_mode = stmmac_set_rx_mode, + .ndo_tx_timeout = stmmac_tx_timeout, + .ndo_eth_ioctl = stmmac_ioctl, + .ndo_setup_tc = stmmac_setup_tc, + .ndo_select_queue = stmmac_select_queue, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = stmmac_poll_controller, +#endif + .ndo_set_mac_address = stmmac_set_mac_address, + .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, + .ndo_bpf = stmmac_bpf, + .ndo_xdp_xmit = stmmac_xdp_xmit, + .ndo_xsk_wakeup = stmmac_xsk_wakeup, +}; + +static void ec_kick_watchdog(struct irq_work *work) +{ + struct stmmac_priv *priv = + container_of(work, struct stmmac_priv, ec_watchdog_kicker); + stmmac_common_interrupt(priv); +} + +/** + * ec_poll - EtherCAT poll routine + * @netdev: net device structure + * + * This function can never fail. + * + **/ +void ec_poll(struct net_device *netdev) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + int i; + int budget = 128; + u32 maxq; + + if (!get_ecdev(priv)) + return; + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); + + if (jiffies - priv->ec_watchdog_jiffies >= 2 * HZ) { + priv->ec_watchdog_jiffies = jiffies; + irq_work_queue(&priv->ec_watchdog_kicker); + } + + for (i = 0; i < maxq; i++) { + struct stmmac_channel *ch = &priv->channel[i]; + struct stmmac_priv *priv = ch->priv_data; + u32 chan = ch->index; + int work_done; + + work_done = stmmac_rx(priv, budget, chan); + work_done = stmmac_tx_clean(priv, budget, chan); + } +} + +static void stmmac_reset_subtask(struct stmmac_priv *priv) +{ + if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) + return; + if (test_bit(STMMAC_DOWN, &priv->state)) + return; + + netdev_err(priv->dev, "Reset adapter.\n"); + + rtnl_lock(); + netif_trans_update(priv->dev); + while (test_and_set_bit(STMMAC_RESETING, &priv->state)) + usleep_range(1000, 2000); + + set_bit(STMMAC_DOWN, &priv->state); + dev_close(priv->dev); + dev_open(priv->dev, NULL); + clear_bit(STMMAC_DOWN, &priv->state); + clear_bit(STMMAC_RESETING, &priv->state); + rtnl_unlock(); +} + +static void stmmac_service_task(struct work_struct *work) +{ + struct stmmac_priv *priv = container_of(work, struct stmmac_priv, + service_task); + + stmmac_reset_subtask(priv); + clear_bit(STMMAC_SERVICE_SCHED, &priv->state); +} + +/** + * stmmac_hw_init - Init the MAC device + * @priv: driver private structure + * Description: this function is to configure the MAC device according to + * some platform parameters or the HW capability register. It prepares the + * driver to use either ring or chain modes and to setup either enhanced or + * normal descriptors. + */ +static int stmmac_hw_init(struct stmmac_priv *priv) +{ + int ret; + + /* dwmac-sun8i only work in chain mode */ + if (priv->plat->has_sun8i) + chain_mode = 1; + priv->chain_mode = chain_mode; + + /* Initialize HW Interface */ + ret = stmmac_hwif_init(priv); + if (ret) + return ret; + + /* Get the HW capability (new GMAC newer than 3.50a) */ + priv->hw_cap_support = stmmac_get_hw_features(priv); + if (priv->hw_cap_support) { + dev_info(priv->device, "DMA HW capability register supported\n"); + + /* We can override some gmac/dma configuration fields: e.g. + * enh_desc, tx_coe (e.g. that are passed through the + * platform) with the values from the HW capability + * register (if supported). + */ + priv->plat->enh_desc = priv->dma_cap.enh_desc; + priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && + !priv->plat->use_phy_wol; + priv->hw->pmt = priv->plat->pmt; + if (priv->dma_cap.hash_tb_sz) { + priv->hw->multicast_filter_bins = + (BIT(priv->dma_cap.hash_tb_sz) << 5); + priv->hw->mcast_bits_log2 = + ilog2(priv->hw->multicast_filter_bins); + } + + /* TXCOE doesn't work in thresh DMA mode */ + if (priv->plat->force_thresh_dma_mode) + priv->plat->tx_coe = 0; + else + priv->plat->tx_coe = priv->dma_cap.tx_coe; + + /* In case of GMAC4 rx_coe is from HW cap register. */ + priv->plat->rx_coe = priv->dma_cap.rx_coe; + + if (priv->dma_cap.rx_coe_type2) + priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; + else if (priv->dma_cap.rx_coe_type1) + priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; + + } else { + dev_info(priv->device, "No HW DMA feature register supported\n"); + } + + if (priv->plat->rx_coe) { + priv->hw->rx_csum = priv->plat->rx_coe; + dev_info(priv->device, "RX Checksum Offload Engine supported\n"); + if (priv->synopsys_id < DWMAC_CORE_4_00) + dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); + } + if (priv->plat->tx_coe) + dev_info(priv->device, "TX Checksum insertion supported\n"); + + if (priv->plat->pmt) { + dev_info(priv->device, "Wake-Up On Lan supported\n"); + device_set_wakeup_capable(priv->device, 1); + } + + if (priv->dma_cap.tsoen) + dev_info(priv->device, "TSO supported\n"); + + priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; + priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; + + /* Run HW quirks, if any */ + if (priv->hwif_quirks) { + ret = priv->hwif_quirks(priv); + if (ret) + return ret; + } + + /* Rx Watchdog is available in the COREs newer than the 3.40. + * In some case, for example on bugged HW this feature + * has to be disable and this can be done by passing the + * riwt_off field from the platform. + */ + if (((priv->synopsys_id >= DWMAC_CORE_3_50) || + (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { + priv->use_riwt = 1; + dev_info(priv->device, + "Enable RX Mitigation via HW Watchdog Timer\n"); + } + + return 0; +} + +static void stmmac_napi_add(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 queue, maxq; + + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + ch->priv_data = priv; + ch->index = queue; + spin_lock_init(&ch->lock); + + if (queue < priv->plat->rx_queues_to_use) { + netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); + } + if (queue < priv->plat->tx_queues_to_use) { + netif_napi_add_tx(dev, &ch->tx_napi, + stmmac_napi_poll_tx); + } + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_add(dev, &ch->rxtx_napi, + stmmac_napi_poll_rxtx); + } + } +} + +static void stmmac_napi_del(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 queue, maxq; + + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + if (queue < priv->plat->rx_queues_to_use) + netif_napi_del(&ch->rx_napi); + if (queue < priv->plat->tx_queues_to_use) + netif_napi_del(&ch->tx_napi); + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_del(&ch->rxtx_napi); + } + } +} + +int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0, i; + + if (netif_running(dev)) + stmmac_release(dev); + + stmmac_napi_del(dev); + + priv->plat->rx_queues_to_use = rx_cnt; + priv->plat->tx_queues_to_use = tx_cnt; + if (!netif_is_rxfh_configured(dev)) + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + priv->rss.table[i] = ethtool_rxfh_indir_default(i, + rx_cnt); + + stmmac_napi_add(dev); + + if (netif_running(dev)) + ret = stmmac_open(dev); + + return ret; +} + +int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + if (netif_running(dev)) + stmmac_release(dev); + + priv->dma_conf.dma_rx_size = rx_size; + priv->dma_conf.dma_tx_size = tx_size; + + if (netif_running(dev)) + ret = stmmac_open(dev); + + return ret; +} + +#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" +static void stmmac_fpe_lp_task(struct work_struct *work) +{ + struct stmmac_priv *priv = container_of(work, struct stmmac_priv, + fpe_task); + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + bool *enable = &fpe_cfg->enable; + int retries = 20; + + while (retries-- > 0) { + /* Bail out immediately if FPE handshake is OFF */ + if (*lo_state == FPE_STATE_OFF || !*hs_enable) + break; + + if (*lo_state == FPE_STATE_ENTERING_ON && + *lp_state == FPE_STATE_ENTERING_ON) { + stmmac_fpe_configure(priv, priv->ioaddr, + fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + *enable); + + netdev_info(priv->dev, "configured FPE\n"); + + *lo_state = FPE_STATE_ON; + *lp_state = FPE_STATE_ON; + netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); + break; + } + + if ((*lo_state == FPE_STATE_CAPABLE || + *lo_state == FPE_STATE_ENTERING_ON) && + *lp_state != FPE_STATE_ON) { + netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, + *lo_state, *lp_state); + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + fpe_cfg, + MPACKET_VERIFY); + } + /* Sleep then retry */ + msleep(500); + } + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); +} + +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) +{ + if (priv->plat->fpe_cfg->hs_enable != enable) { + if (enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + priv->plat->fpe_cfg, + MPACKET_VERIFY); + } else { + priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; + priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; + } + + priv->plat->fpe_cfg->hs_enable = enable; + } +} + +/** + * stmmac_ec_dvr_probe + * @device: device pointer + * @plat_dat: platform data pointer + * @res: stmmac resource pointer + * Description: this is the main probe function used to + * call the alloc_etherdev, allocate the priv structure. + * Return: + * returns 0 on success, otherwise errno. + */ +int stmmac_ec_dvr_probe(struct device *device, + struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *res) +{ + struct net_device *ndev = NULL; + struct stmmac_priv *priv; + u32 rxq; + int i, ret = 0; + + ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), + MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, device); + + priv = netdev_priv(ndev); + priv->device = device; + priv->dev = ndev; + priv->ecdev_ = NULL; + priv->ecdev_initialized = false; + + stmmac_set_ethtool_ops(ndev); + priv->pause = pause; + priv->plat = plat_dat; + priv->ioaddr = res->addr; + priv->dev->base_addr = (unsigned long)res->addr; + priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; + + priv->dev->irq = res->irq; + priv->wol_irq = res->wol_irq; + priv->lpi_irq = res->lpi_irq; + priv->sfty_ce_irq = res->sfty_ce_irq; + priv->sfty_ue_irq = res->sfty_ue_irq; + for (i = 0; i < MTL_MAX_RX_QUEUES; i++) + priv->rx_irq[i] = res->rx_irq[i]; + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) + priv->tx_irq[i] = res->tx_irq[i]; + + if (!is_zero_ether_addr(res->mac)) + eth_hw_addr_set(priv->dev, res->mac); + + dev_set_drvdata(device, priv->dev); + + /* Verify driver arguments */ + stmmac_verify_args(); + + priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); + if (!priv->af_xdp_zc_qps) + return -ENOMEM; + + /* Allocate workqueue */ + priv->wq = create_singlethread_workqueue("stmmac_wq"); + if (!priv->wq) { + dev_err(priv->device, "failed to create workqueue\n"); + ret = -ENOMEM; + goto error_wq_init; + } + + INIT_WORK(&priv->service_task, stmmac_service_task); + + /* Initialize Link Partner FPE workqueue */ + INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); + + /* Override with kernel parameters if supplied XXX CRS XXX + * this needs to have multiple instances + */ + if ((phyaddr >= 0) && (phyaddr <= 31)) + priv->plat->phy_addr = phyaddr; + + if (priv->plat->stmmac_rst) { + ret = reset_control_assert(priv->plat->stmmac_rst); + reset_control_deassert(priv->plat->stmmac_rst); + /* Some reset controllers have only reset callback instead of + * assert + deassert callbacks pair. + */ + if (ret == -ENOTSUPP) + reset_control_reset(priv->plat->stmmac_rst); + } + + ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); + if (ret == -ENOTSUPP) + dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", + ERR_PTR(ret)); + + /* Wait a bit for the reset to take effect */ + udelay(10); + + /* Init MAC and get the capabilities */ + ret = stmmac_hw_init(priv); + if (ret) + goto error_hw_init; + + /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. + */ + if (priv->synopsys_id < DWMAC_CORE_5_20) + priv->plat->dma_cfg->dche = false; + + stmmac_check_ether_addr(priv); + + ndev->netdev_ops = &stmmac_netdev_ops; + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM; + + ret = stmmac_tc_init(priv, priv); + if (!ret) { + ndev->hw_features |= NETIF_F_HW_TC; + } + + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + if (priv->plat->has_gmac4) + ndev->hw_features |= NETIF_F_GSO_UDP_L4; + priv->tso = true; + dev_info(priv->device, "TSO feature enabled\n"); + } + + if (priv->dma_cap.sphen && !priv->plat->sph_disable) { + ndev->hw_features |= NETIF_F_GRO; + priv->sph_cap = true; + priv->sph = priv->sph_cap; + dev_info(priv->device, "SPH feature enabled\n"); + } + + /* Ideally our host DMA address width is the same as for the + * device. However, it may differ and then we have to use our + * host DMA width for allocation and the device DMA width for + * register handling. + */ + if (priv->plat->host_dma_width) + priv->dma_cap.host_dma_width = priv->plat->host_dma_width; + else + priv->dma_cap.host_dma_width = priv->dma_cap.addr64; + + if (priv->dma_cap.host_dma_width) { + ret = dma_set_mask_and_coherent(device, + DMA_BIT_MASK(priv->dma_cap.host_dma_width)); + if (!ret) { + dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", + priv->dma_cap.host_dma_width, priv->dma_cap.addr64); + + /* + * If more than 32 bits can be addressed, make sure to + * enable enhanced addressing mode. + */ + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + priv->plat->dma_cfg->eame = true; + } else { + ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); + if (ret) { + dev_err(priv->device, "Failed to set DMA Mask\n"); + goto error_hw_init; + } + + priv->dma_cap.host_dma_width = 32; + } + } + + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); +#ifdef STMMAC_VLAN_TAG_USED + /* Both mac100 and gmac support receive VLAN tag detection */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; + if (priv->dma_cap.vlhash) { + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + } + if (priv->dma_cap.vlins) { + ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; + if (priv->dma_cap.dvlan) + ndev->features |= NETIF_F_HW_VLAN_STAG_TX; + } +#endif + priv->msg_enable = netif_msg_init(debug, default_msg_level); + + /* Initialize RSS */ + rxq = priv->plat->rx_queues_to_use; + netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); + + if (priv->dma_cap.rssen && priv->plat->rss_en) + ndev->features |= NETIF_F_RXHASH; + + /* MTU range: 46 - hw-specific max */ + ndev->min_mtu = ETH_ZLEN - ETH_HLEN; + if (priv->plat->has_xgmac) + ndev->max_mtu = XGMAC_JUMBO_LEN; + else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) + ndev->max_mtu = JUMBO_LEN; + else + ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); + /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu + * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. + */ + if ((priv->plat->maxmtu < ndev->max_mtu) && + (priv->plat->maxmtu >= ndev->min_mtu)) + ndev->max_mtu = priv->plat->maxmtu; + else if (priv->plat->maxmtu < ndev->min_mtu) + dev_warn(priv->device, + "%s: warning: maxmtu having invalid value (%d)\n", + __func__, priv->plat->maxmtu); + + if (flow_ctrl) + priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ + + /* Setup channels NAPI */ + stmmac_napi_add(ndev); + + mutex_init(&priv->lock); + + /* If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be + * changed at run-time and it is fixed. Viceversa the driver'll try to + * set the MDC clock dynamically according to the csr actual + * clock input. + */ + if (priv->plat->clk_csr >= 0) + priv->clk_csr = priv->plat->clk_csr; + else + stmmac_clk_csr_set(priv); + + stmmac_check_pcs_mode(priv); + + pm_runtime_get_noresume(device); + pm_runtime_set_active(device); + if (!pm_runtime_enabled(device)) + pm_runtime_enable(device); + + if (priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) { + /* MDIO bus Registration */ + ret = stmmac_mdio_register(ndev); + if (ret < 0) { + dev_err_probe(priv->device, ret, + "%s: MDIO bus (id: %d) registration failed\n", + __func__, priv->plat->bus_id); + goto error_mdio_register; + } + } + + if (priv->plat->speed_mode_2500) + priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); + + if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { + ret = stmmac_xpcs_setup(priv->mii); + if (ret) + goto error_xpcs_setup; + } + + ret = stmmac_phy_setup(priv); + if (ret) { + netdev_err(ndev, "failed to setup phy (%d)\n", ret); + goto error_phy_setup; + } + + priv->ecdev_ = ecdev_offer(ndev, ec_poll, THIS_MODULE); + priv->ecdev_initialized = true; + if (!get_ecdev(priv)) { + ret = register_netdev(ndev); + if (ret) { + dev_err(priv->device, "%s: ERROR %i registering the device\n", + __func__, ret); + goto error_netdev_register; + } + } + +#ifdef CONFIG_DEBUG_FS + stmmac_init_fs(ndev); +#endif + + if (priv->plat->dump_debug_regs) + priv->plat->dump_debug_regs(priv->plat->bsp_priv); + + /* Let pm_runtime_put() disable the clocks. + * If CONFIG_PM is not enabled, the clocks will stay powered. + */ + pm_runtime_put(device); + if (get_ecdev(priv)) { + init_irq_work(&priv->ec_watchdog_kicker, ec_kick_watchdog); + ret = ecdev_open(get_ecdev(priv)); + if (ret) { + ecdev_withdraw(get_ecdev(priv)); + goto error_netdev_register; + } + } + + return ret; + +error_netdev_register: + phylink_destroy(priv->phylink); +error_xpcs_setup: +error_phy_setup: + if (priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); +error_mdio_register: + stmmac_napi_del(ndev); +error_hw_init: + destroy_workqueue(priv->wq); +error_wq_init: + bitmap_free(priv->af_xdp_zc_qps); + + return ret; +} + +/** + * stmmac_ec_dvr_remove + * @dev: device pointer + * Description: this function resets the TX/RX processes, disables the MAC RX/TX + * changes the link status, releases the DMA descriptor rings. + */ +int stmmac_ec_dvr_remove(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + netdev_info(priv->dev, "%s: removing driver", __func__); + + pm_runtime_get_sync(dev); + + stmmac_stop_all_dma(priv); + stmmac_mac_set(priv, priv->ioaddr, false); + if (get_ecdev(priv)) { + ecdev_close(get_ecdev(priv)); + irq_work_sync(&priv->ec_watchdog_kicker); + ecdev_withdraw(get_ecdev(priv)); + priv->ecdev_ = NULL; + priv->ecdev_initialized = false; + } else { + netif_carrier_off(ndev); + unregister_netdev(ndev); + } + +#ifdef CONFIG_DEBUG_FS + stmmac_exit_fs(ndev); +#endif + phylink_destroy(priv->phylink); + priv->phylink = NULL; + if (priv->plat->stmmac_rst) + reset_control_assert(priv->plat->stmmac_rst); + reset_control_assert(priv->plat->stmmac_ahb_rst); + if (priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); + destroy_workqueue(priv->wq); + mutex_destroy(&priv->lock); + bitmap_free(priv->af_xdp_zc_qps); + + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + + return 0; +} + +/** + * stmmac_suspend - suspend callback + * @dev: device pointer + * Description: this is the function to suspend the device and it is called + * by the platform driver to stop the network queue, release the resources, + * program the PMT register (for WoL), clean and release driver resources. + */ +int stmmac_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + u32 chan; + if (get_ecdev(priv)) { + return -EBUSY; + } + + if (!ndev || !netif_running(ndev)) + return 0; + + mutex_lock(&priv->lock); + + netif_device_detach(ndev); + + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + + if (priv->eee_enabled) { + priv->tx_path_in_lpi_mode = false; + del_timer_sync(&priv->eee_ctrl_timer); + } + + /* Stop TX/RX DMA */ + stmmac_stop_all_dma(priv); + + if (priv->plat->serdes_powerdown) + priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); + + /* Enable Power down mode by programming the PMT regs */ + if (device_may_wakeup(priv->device) && priv->plat->pmt) { + stmmac_pmt(priv, priv->hw, priv->wolopts); + priv->irq_wake = 1; + } else { + stmmac_mac_set(priv, priv->ioaddr, false); + pinctrl_pm_select_sleep_state(priv->device); + } + + mutex_unlock(&priv->lock); + + rtnl_lock(); + if (device_may_wakeup(priv->device) && priv->plat->pmt) { + phylink_suspend(priv->phylink, true); + } else { + if (device_may_wakeup(priv->device)) + phylink_speed_down(priv->phylink, false); + phylink_suspend(priv->phylink, false); + } + rtnl_unlock(); + + if (priv->dma_cap.fpesel) { + /* Disable FPE */ + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, false); + + stmmac_fpe_handshake(priv, false); + stmmac_fpe_stop_wq(priv); + } + + priv->speed = SPEED_UNKNOWN; + return 0; +} + +static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + + rx_q->cur_rx = 0; + rx_q->dirty_rx = 0; +} + +static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + + tx_q->cur_tx = 0; + tx_q->dirty_tx = 0; + tx_q->mss = 0; + + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); +} + +/** + * stmmac_reset_queues_param - reset queue parameters + * @priv: device pointer + */ +static void stmmac_reset_queues_param(struct stmmac_priv *priv) +{ + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + for (queue = 0; queue < rx_cnt; queue++) + stmmac_reset_rx_queue(priv, queue); + + for (queue = 0; queue < tx_cnt; queue++) + stmmac_reset_tx_queue(priv, queue); +} + +/** + * stmmac_resume - resume callback + * @dev: device pointer + * Description: when resume this function is invoked to setup the DMA and CORE + * in a usable state. + */ +int stmmac_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + int ret; + + if (get_ecdev(priv)) { + return -EBUSY; + } + if (!netif_running(ndev)) + return 0; + + /* Power Down bit, into the PM register, is cleared + * automatically as soon as a magic packet or a Wake-up frame + * is received. Anyway, it's better to manually clear + * this bit because it can generate problems while resuming + * from another devices (e.g. serial console). + */ + if (device_may_wakeup(priv->device) && priv->plat->pmt) { + mutex_lock(&priv->lock); + stmmac_pmt(priv, priv->hw, 0); + mutex_unlock(&priv->lock); + priv->irq_wake = 0; + } else { + pinctrl_pm_select_default_state(priv->device); + /* reset the phy so that it's ready */ + if (priv->mii) + stmmac_mdio_reset(priv->mii); + } + + if (priv->plat->serdes_powerup) { + ret = priv->plat->serdes_powerup(ndev, + priv->plat->bsp_priv); + + if (ret < 0) + return ret; + } + + rtnl_lock(); + if (device_may_wakeup(priv->device) && priv->plat->pmt) { + phylink_resume(priv->phylink); + } else { + phylink_resume(priv->phylink); + if (device_may_wakeup(priv->device)) + phylink_speed_up(priv->phylink); + } + rtnl_unlock(); + + rtnl_lock(); + mutex_lock(&priv->lock); + + stmmac_reset_queues_param(priv); + + stmmac_free_tx_skbufs(priv); + stmmac_clear_descriptors(priv, &priv->dma_conf); + + stmmac_hw_setup(ndev, false); + stmmac_init_coalesce(priv); + stmmac_set_rx_mode(ndev); + + stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); + + stmmac_enable_all_queues(priv); + stmmac_enable_all_dma_irq(priv); + + mutex_unlock(&priv->lock); + rtnl_unlock(); + + netif_device_attach(ndev); + + return 0; +} + +#ifndef MODULE +static int __init stmmac_cmdline_opt(char *str) +{ + char *opt; + + if (!str || !*str) + return 1; + while ((opt = strsep(&str, ",")) != NULL) { + if (!strncmp(opt, "debug:", 6)) { + if (kstrtoint(opt + 6, 0, &debug)) + goto err; + } else if (!strncmp(opt, "phyaddr:", 8)) { + if (kstrtoint(opt + 8, 0, &phyaddr)) + goto err; + } else if (!strncmp(opt, "buf_sz:", 7)) { + if (kstrtoint(opt + 7, 0, &buf_sz)) + goto err; + } else if (!strncmp(opt, "tc:", 3)) { + if (kstrtoint(opt + 3, 0, &tc)) + goto err; + } else if (!strncmp(opt, "watchdog:", 9)) { + if (kstrtoint(opt + 9, 0, &watchdog)) + goto err; + } else if (!strncmp(opt, "flow_ctrl:", 10)) { + if (kstrtoint(opt + 10, 0, &flow_ctrl)) + goto err; + } else if (!strncmp(opt, "pause:", 6)) { + if (kstrtoint(opt + 6, 0, &pause)) + goto err; + } else if (!strncmp(opt, "eee_timer:", 10)) { + if (kstrtoint(opt + 10, 0, &eee_timer)) + goto err; + } else if (!strncmp(opt, "chain_mode:", 11)) { + if (kstrtoint(opt + 11, 0, &chain_mode)) + goto err; + } + } + return 1; + +err: + pr_err("%s: ERROR broken module parameter conversion", __func__); + return 1; +} + +__setup("stmmaceth=", stmmac_cmdline_opt); +#endif /* MODULE */ + +int __cold stmmac_init(void) +{ +#ifdef CONFIG_DEBUG_FS + /* Create debugfs main directory if it doesn't exist yet */ + if (!stmmac_fs_dir) + stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + register_netdevice_notifier(&stmmac_notifier); +#endif + + return 0; +} + +void __cold stmmac_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + unregister_netdevice_notifier(&stmmac_notifier); + debugfs_remove_recursive(stmmac_fs_dir); +#endif +} diff --git a/devices/stmmac/stmmac_main-6.1-orig.c b/devices/stmmac/stmmac_main-6.1-orig.c new file mode 100644 index 00000000..e2d51014 --- /dev/null +++ b/devices/stmmac/stmmac_main-6.1-orig.c @@ -0,0 +1,7664 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. + ST Ethernet IPs are built around a Synopsys IP Core. + + Copyright(C) 2007-2011 STMicroelectronics Ltd + + + Author: Giuseppe Cavallaro + + Documentation available at: + http://www.stlinux.com + Support available at: + https://bugzilla.stlinux.com/ +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_DEBUG_FS +#include +#include +#endif /* CONFIG_DEBUG_FS */ +#include +#include +#include +#include +#include +#include +#include "stmmac_ptp.h" +#include "stmmac.h" +#include "stmmac_xdp.h" +#include +#include +#include "dwmac1000.h" +#include "dwxgmac2.h" +#include "hwif.h" + +/* As long as the interface is active, we keep the timestamping counter enabled + * with fine resolution and binary rollover. This avoid non-monotonic behavior + * (clock jumps) when changing timestamping settings at runtime. + */ +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ + PTP_TCR_TSCTRLSSR) + +#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) +#define TSO_MAX_BUFF_SIZE (SZ_16K - 1) + +/* Module parameters */ +#define TX_TIMEO 5000 +static int watchdog = TX_TIMEO; +module_param(watchdog, int, 0644); +MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); + +static int debug = -1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); + +static int phyaddr = -1; +module_param(phyaddr, int, 0444); +MODULE_PARM_DESC(phyaddr, "Physical device address"); + +#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) +#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) + +/* Limit to make sure XDP TX and slow path can coexist */ +#define STMMAC_XSK_TX_BUDGET_MAX 256 +#define STMMAC_TX_XSK_AVAIL 16 +#define STMMAC_RX_FILL_BATCH 16 + +#define STMMAC_XDP_PASS 0 +#define STMMAC_XDP_CONSUMED BIT(0) +#define STMMAC_XDP_TX BIT(1) +#define STMMAC_XDP_REDIRECT BIT(2) + +static int flow_ctrl = FLOW_AUTO; +module_param(flow_ctrl, int, 0644); +MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); + +static int pause = PAUSE_TIME; +module_param(pause, int, 0644); +MODULE_PARM_DESC(pause, "Flow Control Pause Time"); + +#define TC_DEFAULT 64 +static int tc = TC_DEFAULT; +module_param(tc, int, 0644); +MODULE_PARM_DESC(tc, "DMA threshold control value"); + +#define DEFAULT_BUFSIZE 1536 +static int buf_sz = DEFAULT_BUFSIZE; +module_param(buf_sz, int, 0644); +MODULE_PARM_DESC(buf_sz, "DMA buffer size"); + +#define STMMAC_RX_COPYBREAK 256 + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); + +#define STMMAC_DEFAULT_LPI_TIMER 1000 +static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; +module_param(eee_timer, int, 0644); +MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); +#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) + +/* By default the driver will use the ring mode to manage tx and rx descriptors, + * but allow user to force to use the chain instead of the ring + */ +static unsigned int chain_mode; +module_param(chain_mode, int, 0444); +MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); + +static irqreturn_t stmmac_interrupt(int irq, void *dev_id); +/* For MSI interrupts handling */ +static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); +static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); +static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); +static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); +static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); +static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); +static void stmmac_reset_queues_param(struct stmmac_priv *priv); +static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); +static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, + u32 rxmode, u32 chan); + +#ifdef CONFIG_DEBUG_FS +static const struct net_device_ops stmmac_netdev_ops; +static void stmmac_init_fs(struct net_device *dev); +static void stmmac_exit_fs(struct net_device *dev); +#endif + +#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) + +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) +{ + int ret = 0; + + if (enabled) { + ret = clk_prepare_enable(priv->plat->stmmac_clk); + if (ret) + return ret; + ret = clk_prepare_enable(priv->plat->pclk); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + return ret; + } + if (priv->plat->clks_config) { + ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + return ret; + } + } + } else { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + if (priv->plat->clks_config) + priv->plat->clks_config(priv->plat->bsp_priv, enabled); + } + + return ret; +} +EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); + +/** + * stmmac_verify_args - verify the driver parameters. + * Description: it checks the driver parameters and set a default in case of + * errors. + */ +static void stmmac_verify_args(void) +{ + if (unlikely(watchdog < 0)) + watchdog = TX_TIMEO; + if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) + buf_sz = DEFAULT_BUFSIZE; + if (unlikely(flow_ctrl > 1)) + flow_ctrl = FLOW_AUTO; + else if (likely(flow_ctrl < 0)) + flow_ctrl = FLOW_OFF; + if (unlikely((pause < 0) || (pause > 0xffff))) + pause = PAUSE_TIME; + if (eee_timer < 0) + eee_timer = STMMAC_DEFAULT_LPI_TIMER; +} + +static void __stmmac_disable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); + u32 queue; + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_disable(&ch->rxtx_napi); + continue; + } + + if (queue < rx_queues_cnt) + napi_disable(&ch->rx_napi); + if (queue < tx_queues_cnt) + napi_disable(&ch->tx_napi); + } +} + +/** + * stmmac_disable_all_queues - Disable all queues + * @priv: driver private structure + */ +static void stmmac_disable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + struct stmmac_rx_queue *rx_q; + u32 queue; + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + for (queue = 0; queue < rx_queues_cnt; queue++) { + rx_q = &priv->dma_conf.rx_queue[queue]; + if (rx_q->xsk_pool) { + synchronize_rcu(); + break; + } + } + + __stmmac_disable_all_queues(priv); +} + +/** + * stmmac_enable_all_queues - Enable all queues + * @priv: driver private structure + */ +static void stmmac_enable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); + u32 queue; + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_enable(&ch->rxtx_napi); + continue; + } + + if (queue < rx_queues_cnt) + napi_enable(&ch->rx_napi); + if (queue < tx_queues_cnt) + napi_enable(&ch->tx_napi); + } +} + +static void stmmac_service_event_schedule(struct stmmac_priv *priv) +{ + if (!test_bit(STMMAC_DOWN, &priv->state) && + !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) + queue_work(priv->wq, &priv->service_task); +} + +static void stmmac_global_err(struct stmmac_priv *priv) +{ + netif_carrier_off(priv->dev); + set_bit(STMMAC_RESET_REQUESTED, &priv->state); + stmmac_service_event_schedule(priv); +} + +/** + * stmmac_clk_csr_set - dynamically set the MDC clock + * @priv: driver private structure + * Description: this is to dynamically set the MDC clock according to the csr + * clock input. + * Note: + * If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be + * changed at run-time and it is fixed (as reported in the driver + * documentation). Viceversa the driver will try to set the MDC + * clock dynamically according to the actual clock input. + */ +static void stmmac_clk_csr_set(struct stmmac_priv *priv) +{ + u32 clk_rate; + + clk_rate = clk_get_rate(priv->plat->stmmac_clk); + + /* Platform provided default clk_csr would be assumed valid + * for all other cases except for the below mentioned ones. + * For values higher than the IEEE 802.3 specified frequency + * we can not estimate the proper divider as it is not known + * the frequency of clk_csr_i. So we do not change the default + * divider. + */ + if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { + if (clk_rate < CSR_F_35M) + priv->clk_csr = STMMAC_CSR_20_35M; + else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) + priv->clk_csr = STMMAC_CSR_35_60M; + else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) + priv->clk_csr = STMMAC_CSR_60_100M; + else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) + priv->clk_csr = STMMAC_CSR_100_150M; + else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) + priv->clk_csr = STMMAC_CSR_150_250M; + else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) + priv->clk_csr = STMMAC_CSR_250_300M; + } + + if (priv->plat->has_sun8i) { + if (clk_rate > 160000000) + priv->clk_csr = 0x03; + else if (clk_rate > 80000000) + priv->clk_csr = 0x02; + else if (clk_rate > 40000000) + priv->clk_csr = 0x01; + else + priv->clk_csr = 0; + } + + if (priv->plat->has_xgmac) { + if (clk_rate > 400000000) + priv->clk_csr = 0x5; + else if (clk_rate > 350000000) + priv->clk_csr = 0x4; + else if (clk_rate > 300000000) + priv->clk_csr = 0x3; + else if (clk_rate > 250000000) + priv->clk_csr = 0x2; + else if (clk_rate > 150000000) + priv->clk_csr = 0x1; + else + priv->clk_csr = 0x0; + } +} + +static void print_pkt(unsigned char *buf, int len) +{ + pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); +} + +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + u32 avail; + + if (tx_q->dirty_tx > tx_q->cur_tx) + avail = tx_q->dirty_tx - tx_q->cur_tx - 1; + else + avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; + + return avail; +} + +/** + * stmmac_rx_dirty - Get RX queue dirty + * @priv: driver private structure + * @queue: RX queue index + */ +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + u32 dirty; + + if (rx_q->dirty_rx <= rx_q->cur_rx) + dirty = rx_q->cur_rx - rx_q->dirty_rx; + else + dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; + + return dirty; +} + +static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) +{ + int tx_lpi_timer; + + /* Clear/set the SW EEE timer flag based on LPI ET enablement */ + priv->eee_sw_timer_en = en ? 0 : 1; + tx_lpi_timer = en ? priv->tx_lpi_timer : 0; + stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); +} + +/** + * stmmac_enable_eee_mode - check and enter in LPI mode + * @priv: driver private structure + * Description: this function is to verify and enter in LPI mode in case of + * EEE. + */ +static int stmmac_enable_eee_mode(struct stmmac_priv *priv) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + /* check if all TX queues have the work finished */ + for (queue = 0; queue < tx_cnt; queue++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + + if (tx_q->dirty_tx != tx_q->cur_tx) + return -EBUSY; /* still unfinished work */ + } + + /* Check and enter in LPI mode */ + if (!priv->tx_path_in_lpi_mode) + stmmac_set_eee_mode(priv, priv->hw, + priv->plat->en_tx_lpi_clockgating); + return 0; +} + +/** + * stmmac_disable_eee_mode - disable and exit from LPI mode + * @priv: driver private structure + * Description: this function is to exit and disable EEE in case of + * LPI state is true. This is called by the xmit. + */ +void stmmac_disable_eee_mode(struct stmmac_priv *priv) +{ + if (!priv->eee_sw_timer_en) { + stmmac_lpi_entry_timer_config(priv, 0); + return; + } + + stmmac_reset_eee_mode(priv, priv->hw); + del_timer_sync(&priv->eee_ctrl_timer); + priv->tx_path_in_lpi_mode = false; +} + +/** + * stmmac_eee_ctrl_timer - EEE TX SW timer. + * @t: timer_list struct containing private info + * Description: + * if there is no data transfer and if we are not in LPI state, + * then MAC Transmitter can be moved to LPI state. + */ +static void stmmac_eee_ctrl_timer(struct timer_list *t) +{ + struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); + + if (stmmac_enable_eee_mode(priv)) + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); +} + +/** + * stmmac_eee_init - init EEE + * @priv: driver private structure + * Description: + * if the GMAC supports the EEE (from the HW cap reg) and the phy device + * can also manage EEE, this function enable the LPI state and start related + * timer. + */ +bool stmmac_eee_init(struct stmmac_priv *priv) +{ + int eee_tw_timer = priv->eee_tw_timer; + + /* Using PCS we cannot dial with the phy registers at this stage + * so we do not support extra feature like EEE. + */ + if (priv->hw->pcs == STMMAC_PCS_TBI || + priv->hw->pcs == STMMAC_PCS_RTBI) + return false; + + /* Check if MAC core supports the EEE feature. */ + if (!priv->dma_cap.eee) + return false; + + mutex_lock(&priv->lock); + + /* Check if it needs to be deactivated */ + if (!priv->eee_active) { + if (priv->eee_enabled) { + netdev_dbg(priv->dev, "disable EEE\n"); + stmmac_lpi_entry_timer_config(priv, 0); + del_timer_sync(&priv->eee_ctrl_timer); + stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); + if (priv->hw->xpcs) + xpcs_config_eee(priv->hw->xpcs, + priv->plat->mult_fact_100ns, + false); + } + mutex_unlock(&priv->lock); + return false; + } + + if (priv->eee_active && !priv->eee_enabled) { + timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); + stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, + eee_tw_timer); + if (priv->hw->xpcs) + xpcs_config_eee(priv->hw->xpcs, + priv->plat->mult_fact_100ns, + true); + } + + if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { + del_timer_sync(&priv->eee_ctrl_timer); + priv->tx_path_in_lpi_mode = false; + stmmac_lpi_entry_timer_config(priv, 1); + } else { + stmmac_lpi_entry_timer_config(priv, 0); + mod_timer(&priv->eee_ctrl_timer, + STMMAC_LPI_T(priv->tx_lpi_timer)); + } + + mutex_unlock(&priv->lock); + netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); + return true; +} + +/* stmmac_get_tx_hwtstamp - get HW TX timestamps + * @priv: driver private structure + * @p : descriptor pointer + * @skb : the socket buffer + * Description : + * This function will read timestamp from the descriptor & pass it to stack. + * and also perform some sanity checks. + */ +static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, + struct dma_desc *p, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps shhwtstamp; + bool found = false; + u64 ns = 0; + + if (!priv->hwts_tx_en) + return; + + /* exit if skb doesn't support hw tstamp */ + if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) + return; + + /* check tx tstamp status */ + if (stmmac_get_tx_timestamp_status(priv, p)) { + stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); + found = true; + } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { + found = true; + } + + if (found) { + ns -= priv->plat->cdc_error_adj; + + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp.hwtstamp = ns_to_ktime(ns); + + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); + /* pass tstamp to stack */ + skb_tstamp_tx(skb, &shhwtstamp); + } +} + +/* stmmac_get_rx_hwtstamp - get HW RX timestamps + * @priv: driver private structure + * @p : descriptor pointer + * @np : next descriptor pointer + * @skb : the socket buffer + * Description : + * This function will read received packet's timestamp from the descriptor + * and pass it to stack. It also perform some sanity checks. + */ +static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, + struct dma_desc *np, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamp = NULL; + struct dma_desc *desc = p; + u64 ns = 0; + + if (!priv->hwts_rx_en) + return; + /* For GMAC4, the valid timestamp is from CTX next desc. */ + if (priv->plat->has_gmac4 || priv->plat->has_xgmac) + desc = np; + + /* Check if timestamp is available */ + if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { + stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); + + ns -= priv->plat->cdc_error_adj; + + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); + shhwtstamp = skb_hwtstamps(skb); + memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp->hwtstamp = ns_to_ktime(ns); + } else { + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); + } +} + +/** + * stmmac_hwtstamp_set - control hardware timestamping. + * @dev: device pointer. + * @ifr: An IOCTL specific structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * Description: + * This function configures the MAC to enable/disable both outgoing(TX) + * and incoming(RX) packets time stamping based on user input. + * Return Value: + * 0 on success and an appropriate -ve integer on failure. + */ +static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + u32 ptp_v2 = 0; + u32 tstamp_all = 0; + u32 ptp_over_ipv4_udp = 0; + u32 ptp_over_ipv6_udp = 0; + u32 ptp_over_ethernet = 0; + u32 snap_type_sel = 0; + u32 ts_master_en = 0; + u32 ts_event_en = 0; + + if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { + netdev_alert(priv->dev, "No support for HW time stamping\n"); + priv->hwts_tx_en = 0; + priv->hwts_rx_en = 0; + + return -EOPNOTSUPP; + } + + if (copy_from_user(&config, ifr->ifr_data, + sizeof(config))) + return -EFAULT; + + netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + if (priv->adv_ts) { + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* time stamp no incoming packet at all */ + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* 'xmac' hardware can support Sync, Pdelay_Req and + * Pdelay_resp by setting bit14 and bits17/16 to 01 + * This leaves Delay_Req timestamps out. + * Enable all events *and* general purpose message + * timestamping + */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + /* PTP v1, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + /* PTP v1, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + /* PTP v2, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for all event messages */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* PTP v2, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* PTP v2, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_EVENT: + /* PTP v2/802.AS1 any layer, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->synopsys_id < DWMAC_CORE_4_10) + ts_event_en = PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* PTP v2/802.AS1, any layer, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* PTP v2/802.AS1, any layer, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + /* time stamp any incoming packet */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_all = PTP_TCR_TSENALL; + break; + + default: + return -ERANGE; + } + } else { + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + default: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + } + } + priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; + + priv->systime_flags = STMMAC_HWTS_ACTIVE; + + if (priv->hwts_tx_en || priv->hwts_rx_en) { + priv->systime_flags |= tstamp_all | ptp_v2 | + ptp_over_ethernet | ptp_over_ipv6_udp | + ptp_over_ipv4_udp | ts_event_en | + ts_master_en | snap_type_sel; + } + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); + + memcpy(&priv->tstamp_config, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +/** + * stmmac_hwtstamp_get - read hardware timestamping. + * @dev: device pointer. + * @ifr: An IOCTL specific structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * Description: + * This function obtain the current hardware timestamping settings + * as requested. + */ +static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct hwtstamp_config *config = &priv->tstamp_config; + + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * stmmac_init_tstamp_counter - init hardware timestamping counter + * @priv: driver private structure + * @systime_flags: timestamping flags + * Description: + * Initialize hardware counter for packet timestamping. + * This is valid as long as the interface is open and not suspended. + * Will be rerun after resuming from suspend, case in which the timestamping + * flags updated by stmmac_hwtstamp_set() also need to be restored. + */ +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) +{ + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + struct timespec64 now; + u32 sec_inc = 0; + u64 temp = 0; + + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); + priv->systime_flags = systime_flags; + + /* program Sub Second Increment reg */ + stmmac_config_sub_second_increment(priv, priv->ptpaddr, + priv->plat->clk_ptp_rate, + xmac, &sec_inc); + temp = div_u64(1000000000ULL, sec_inc); + + /* Store sub second increment for later use */ + priv->sub_second_inc = sec_inc; + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); + + /* initialize system time */ + ktime_get_real_ts64(&now); + + /* lower 32 bits of tv_sec are safe until y2106 */ + stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); + +/** + * stmmac_init_ptp - init PTP + * @priv: driver private structure + * Description: this is to verify if the HW supports the PTPv1 or PTPv2. + * This is done by looking at the HW cap. register. + * This function also registers the ptp driver. + */ +static int stmmac_init_ptp(struct stmmac_priv *priv) +{ + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + int ret; + + if (priv->plat->ptp_clk_freq_config) + priv->plat->ptp_clk_freq_config(priv); + + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); + if (ret) + return ret; + + priv->adv_ts = 0; + /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ + if (xmac && priv->dma_cap.atime_stamp) + priv->adv_ts = 1; + /* Dwmac 3.x core with extend_desc can support adv_ts */ + else if (priv->extend_desc && priv->dma_cap.atime_stamp) + priv->adv_ts = 1; + + if (priv->dma_cap.time_stamp) + netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); + + if (priv->adv_ts) + netdev_info(priv->dev, + "IEEE 1588-2008 Advanced Timestamp supported\n"); + + priv->hwts_tx_en = 0; + priv->hwts_rx_en = 0; + + return 0; +} + +static void stmmac_release_ptp(struct stmmac_priv *priv) +{ + clk_disable_unprepare(priv->plat->clk_ptp_ref); + stmmac_ptp_unregister(priv); +} + +/** + * stmmac_mac_flow_ctrl - Configure flow control in all queues + * @priv: driver private structure + * @duplex: duplex passed to the next function + * Description: It is used for configuring the flow control in all queues + */ +static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + + stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, + priv->pause, tx_cnt); +} + +static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + if (!priv->hw->xpcs) + return NULL; + + return &priv->hw->xpcs->pcs; +} + +static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + /* Nothing to do, xpcs_config() handles everything */ +} + +static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (is_up && *hs_enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, + MPACKET_VERIFY); + } else { + *lo_state = FPE_STATE_OFF; + *lp_state = FPE_STATE_OFF; + } +} + +static void stmmac_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + stmmac_mac_set(priv, priv->ioaddr, false); + priv->eee_active = false; + priv->tx_lpi_enabled = false; + priv->eee_enabled = stmmac_eee_init(priv); + stmmac_set_eee_pls(priv, priv->hw, false); + + if (priv->dma_cap.fpesel) + stmmac_fpe_link_state_handle(priv, false); +} + +static void stmmac_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + u32 old_ctrl, ctrl; + + old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); + ctrl = old_ctrl & ~priv->hw->link.speed_mask; + + if (interface == PHY_INTERFACE_MODE_USXGMII) { + switch (speed) { + case SPEED_10000: + ctrl |= priv->hw->link.xgmii.speed10000; + break; + case SPEED_5000: + ctrl |= priv->hw->link.xgmii.speed5000; + break; + case SPEED_2500: + ctrl |= priv->hw->link.xgmii.speed2500; + break; + default: + return; + } + } else if (interface == PHY_INTERFACE_MODE_XLGMII) { + switch (speed) { + case SPEED_100000: + ctrl |= priv->hw->link.xlgmii.speed100000; + break; + case SPEED_50000: + ctrl |= priv->hw->link.xlgmii.speed50000; + break; + case SPEED_40000: + ctrl |= priv->hw->link.xlgmii.speed40000; + break; + case SPEED_25000: + ctrl |= priv->hw->link.xlgmii.speed25000; + break; + case SPEED_10000: + ctrl |= priv->hw->link.xgmii.speed10000; + break; + case SPEED_2500: + ctrl |= priv->hw->link.speed2500; + break; + case SPEED_1000: + ctrl |= priv->hw->link.speed1000; + break; + default: + return; + } + } else { + switch (speed) { + case SPEED_2500: + ctrl |= priv->hw->link.speed2500; + break; + case SPEED_1000: + ctrl |= priv->hw->link.speed1000; + break; + case SPEED_100: + ctrl |= priv->hw->link.speed100; + break; + case SPEED_10: + ctrl |= priv->hw->link.speed10; + break; + default: + return; + } + } + + priv->speed = speed; + + if (priv->plat->fix_mac_speed) + priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); + + if (!duplex) + ctrl &= ~priv->hw->link.duplex; + else + ctrl |= priv->hw->link.duplex; + + /* Flow Control operation */ + if (rx_pause && tx_pause) + priv->flow_ctrl = FLOW_AUTO; + else if (rx_pause && !tx_pause) + priv->flow_ctrl = FLOW_RX; + else if (!rx_pause && tx_pause) + priv->flow_ctrl = FLOW_TX; + else + priv->flow_ctrl = FLOW_OFF; + + stmmac_mac_flow_ctrl(priv, duplex); + + if (ctrl != old_ctrl) + writel(ctrl, priv->ioaddr + MAC_CTRL_REG); + + stmmac_mac_set(priv, priv->ioaddr, true); + if (phy && priv->dma_cap.eee) { + priv->eee_active = + phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0; + priv->eee_enabled = stmmac_eee_init(priv); + priv->tx_lpi_enabled = priv->eee_enabled; + stmmac_set_eee_pls(priv, priv->hw, true); + } + + if (priv->dma_cap.fpesel) + stmmac_fpe_link_state_handle(priv, true); +} + +static const struct phylink_mac_ops stmmac_phylink_mac_ops = { + .validate = phylink_generic_validate, + .mac_select_pcs = stmmac_mac_select_pcs, + .mac_config = stmmac_mac_config, + .mac_link_down = stmmac_mac_link_down, + .mac_link_up = stmmac_mac_link_up, +}; + +/** + * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported + * @priv: driver private structure + * Description: this is to verify if the HW supports the PCS. + * Physical Coding Sublayer (PCS) interface that can be used when the MAC is + * configured for the TBI, RTBI, or SGMII PHY interface. + */ +static void stmmac_check_pcs_mode(struct stmmac_priv *priv) +{ + int interface = priv->plat->interface; + + if (priv->dma_cap.pcs) { + if ((interface == PHY_INTERFACE_MODE_RGMII) || + (interface == PHY_INTERFACE_MODE_RGMII_ID) || + (interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { + netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); + priv->hw->pcs = STMMAC_PCS_RGMII; + } else if (interface == PHY_INTERFACE_MODE_SGMII) { + netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); + priv->hw->pcs = STMMAC_PCS_SGMII; + } + } +} + +/** + * stmmac_init_phy - PHY initialization + * @dev: net device structure + * Description: it initializes the driver's PHY state, and attaches the PHY + * to the mac driver. + * Return value: + * 0 on success + */ +static int stmmac_init_phy(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct fwnode_handle *phy_fwnode; + struct fwnode_handle *fwnode; + int ret; + + if (!phylink_expects_phy(priv->phylink)) + return 0; + + fwnode = of_fwnode_handle(priv->plat->phylink_node); + if (!fwnode) + fwnode = dev_fwnode(priv->device); + + if (fwnode) + phy_fwnode = fwnode_get_phy_node(fwnode); + else + phy_fwnode = NULL; + + /* Some DT bindings do not set-up the PHY handle. Let's try to + * manually parse it + */ + if (!phy_fwnode || IS_ERR(phy_fwnode)) { + int addr = priv->plat->phy_addr; + struct phy_device *phydev; + + if (addr < 0) { + netdev_err(priv->dev, "no phy found\n"); + return -ENODEV; + } + + phydev = mdiobus_get_phy(priv->mii, addr); + if (!phydev) { + netdev_err(priv->dev, "no phy at addr %d\n", addr); + return -ENODEV; + } + + ret = phylink_connect_phy(priv->phylink, phydev); + } else { + fwnode_handle_put(phy_fwnode); + ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); + } + + if (!priv->plat->pmt) { + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + + phylink_ethtool_get_wol(priv->phylink, &wol); + device_set_wakeup_capable(priv->device, !!wol.supported); + device_set_wakeup_enable(priv->device, !!wol.wolopts); + } + + return ret; +} + +static int stmmac_phy_setup(struct stmmac_priv *priv) +{ + struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; + struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); + int max_speed = priv->plat->max_speed; + int mode = priv->plat->phy_interface; + struct phylink *phylink; + + priv->phylink_config.dev = &priv->dev->dev; + priv->phylink_config.type = PHYLINK_NETDEV; + if (priv->plat->mdio_bus_data) + priv->phylink_config.ovr_an_inband = + mdio_bus_data->xpcs_an_inband; + + if (!fwnode) + fwnode = dev_fwnode(priv->device); + + /* Set the platform/firmware specified interface mode */ + __set_bit(mode, priv->phylink_config.supported_interfaces); + + /* If we have an xpcs, it defines which PHY interfaces are supported. */ + if (priv->hw->xpcs) + xpcs_get_interfaces(priv->hw->xpcs, + priv->phylink_config.supported_interfaces); + + priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100; + + if (!max_speed || max_speed >= 1000) + priv->phylink_config.mac_capabilities |= MAC_1000; + + if (priv->plat->has_gmac4) { + if (!max_speed || max_speed >= 2500) + priv->phylink_config.mac_capabilities |= MAC_2500FD; + } else if (priv->plat->has_xgmac) { + if (!max_speed || max_speed >= 2500) + priv->phylink_config.mac_capabilities |= MAC_2500FD; + if (!max_speed || max_speed >= 5000) + priv->phylink_config.mac_capabilities |= MAC_5000FD; + if (!max_speed || max_speed >= 10000) + priv->phylink_config.mac_capabilities |= MAC_10000FD; + if (!max_speed || max_speed >= 25000) + priv->phylink_config.mac_capabilities |= MAC_25000FD; + if (!max_speed || max_speed >= 40000) + priv->phylink_config.mac_capabilities |= MAC_40000FD; + if (!max_speed || max_speed >= 50000) + priv->phylink_config.mac_capabilities |= MAC_50000FD; + if (!max_speed || max_speed >= 100000) + priv->phylink_config.mac_capabilities |= MAC_100000FD; + } + + /* Half-Duplex can only work with single queue */ + if (priv->plat->tx_queues_to_use > 1) + priv->phylink_config.mac_capabilities &= + ~(MAC_10HD | MAC_100HD | MAC_1000HD); + priv->phylink_config.mac_managed_pm = true; + + phylink = phylink_create(&priv->phylink_config, fwnode, + mode, &stmmac_phylink_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + priv->phylink = phylink; + return 0; +} + +static void stmmac_display_rx_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 rx_cnt = priv->plat->rx_queues_to_use; + unsigned int desc_size; + void *head_rx; + u32 queue; + + /* Display RX rings */ + for (queue = 0; queue < rx_cnt; queue++) { + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + + pr_info("\tRX Queue %u rings\n", queue); + + if (priv->extend_desc) { + head_rx = (void *)rx_q->dma_erx; + desc_size = sizeof(struct dma_extended_desc); + } else { + head_rx = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } + + /* Display RX ring */ + stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); + } +} + +static void stmmac_display_tx_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + unsigned int desc_size; + void *head_tx; + u32 queue; + + /* Display TX rings */ + for (queue = 0; queue < tx_cnt; queue++) { + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + + pr_info("\tTX Queue %d rings\n", queue); + + if (priv->extend_desc) { + head_tx = (void *)tx_q->dma_etx; + desc_size = sizeof(struct dma_extended_desc); + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { + head_tx = (void *)tx_q->dma_entx; + desc_size = sizeof(struct dma_edesc); + } else { + head_tx = (void *)tx_q->dma_tx; + desc_size = sizeof(struct dma_desc); + } + + stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, + tx_q->dma_tx_phy, desc_size); + } +} + +static void stmmac_display_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + /* Display RX ring */ + stmmac_display_rx_rings(priv, dma_conf); + + /* Display TX ring */ + stmmac_display_tx_rings(priv, dma_conf); +} + +static int stmmac_set_bfsize(int mtu, int bufsize) +{ + int ret = bufsize; + + if (mtu >= BUF_SIZE_8KiB) + ret = BUF_SIZE_16KiB; + else if (mtu >= BUF_SIZE_4KiB) + ret = BUF_SIZE_8KiB; + else if (mtu >= BUF_SIZE_2KiB) + ret = BUF_SIZE_4KiB; + else if (mtu > DEFAULT_BUFSIZE) + ret = BUF_SIZE_2KiB; + else + ret = DEFAULT_BUFSIZE; + + return ret; +} + +/** + * stmmac_clear_rx_descriptors - clear RX descriptors + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + * Description: this function is called to clear the RX descriptors + * in case of both basic and extended descriptors are used. + */ +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + /* Clear the RX descriptors */ + for (i = 0; i < dma_conf->dma_rx_size; i++) + if (priv->extend_desc) + stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, + priv->use_riwt, priv->mode, + (i == dma_conf->dma_rx_size - 1), + dma_conf->dma_buf_sz); + else + stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], + priv->use_riwt, priv->mode, + (i == dma_conf->dma_rx_size - 1), + dma_conf->dma_buf_sz); +} + +/** + * stmmac_clear_tx_descriptors - clear tx descriptors + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index. + * Description: this function is called to clear the TX descriptors + * in case of both basic and extended descriptors are used. + */ +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + int i; + + /* Clear the TX descriptors */ + for (i = 0; i < dma_conf->dma_tx_size; i++) { + int last = (i == (dma_conf->dma_tx_size - 1)); + struct dma_desc *p; + + if (priv->extend_desc) + p = &tx_q->dma_etx[i].basic; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &tx_q->dma_entx[i].basic; + else + p = &tx_q->dma_tx[i]; + + stmmac_init_tx_desc(priv, p, priv->mode, last); + } +} + +/** + * stmmac_clear_descriptors - clear descriptors + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * Description: this function is called to clear the TX and RX descriptors + * in case of both basic and extended descriptors are used. + */ +static void stmmac_clear_descriptors(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 rx_queue_cnt = priv->plat->rx_queues_to_use; + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + /* Clear the RX descriptors */ + for (queue = 0; queue < rx_queue_cnt; queue++) + stmmac_clear_rx_descriptors(priv, dma_conf, queue); + + /* Clear the TX descriptors */ + for (queue = 0; queue < tx_queue_cnt; queue++) + stmmac_clear_tx_descriptors(priv, dma_conf, queue); +} + +/** + * stmmac_init_rx_buffers - init the RX descriptor buffer. + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @p: descriptor pointer + * @i: descriptor index + * @flags: gfp flag + * @queue: RX queue index + * Description: this function is called to allocate a receive buffer, perform + * the DMA mapping and init the descriptor. + */ +static int stmmac_init_rx_buffers(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + struct dma_desc *p, + int i, gfp_t flags, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + if (priv->dma_cap.host_dma_width <= 32) + gfp |= GFP_DMA32; + + if (!buf->page) { + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); + if (!buf->page) + return -ENOMEM; + buf->page_offset = stmmac_rx_offset(priv); + } + + if (priv->sph && !buf->sec_page) { + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); + if (!buf->sec_page) + return -ENOMEM; + + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); + } else { + buf->sec_page = NULL; + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); + } + + buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; + + stmmac_set_desc_addr(priv, p, buf->addr); + if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) + stmmac_init_desc3(priv, p); + + return 0; +} + +/** + * stmmac_free_rx_buffer - free RX dma buffers + * @priv: private structure + * @rx_q: RX queue + * @i: buffer index. + */ +static void stmmac_free_rx_buffer(struct stmmac_priv *priv, + struct stmmac_rx_queue *rx_q, + int i) +{ + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + + if (buf->page) + page_pool_put_full_page(rx_q->page_pool, buf->page, false); + buf->page = NULL; + + if (buf->sec_page) + page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); + buf->sec_page = NULL; +} + +/** + * stmmac_free_tx_buffer - free RX dma buffers + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + * @i: buffer index. + */ +static void stmmac_free_tx_buffer(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue, int i) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + + if (tx_q->tx_skbuff_dma[i].buf && + tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { + if (tx_q->tx_skbuff_dma[i].map_as_page) + dma_unmap_page(priv->device, + tx_q->tx_skbuff_dma[i].buf, + tx_q->tx_skbuff_dma[i].len, + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + tx_q->tx_skbuff_dma[i].buf, + tx_q->tx_skbuff_dma[i].len, + DMA_TO_DEVICE); + } + + if (tx_q->xdpf[i] && + (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || + tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { + xdp_return_frame(tx_q->xdpf[i]); + tx_q->xdpf[i] = NULL; + } + + if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + + if (tx_q->tx_skbuff[i] && + tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { + dev_kfree_skb_any(tx_q->tx_skbuff[i]); + tx_q->tx_skbuff[i] = NULL; + } + + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; +} + +/** + * dma_free_rx_skbufs - free RX dma buffers + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + */ +static void dma_free_rx_skbufs(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + for (i = 0; i < dma_conf->dma_rx_size; i++) + stmmac_free_rx_buffer(priv, rx_q, i); +} + +static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue, gfp_t flags) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + for (i = 0; i < dma_conf->dma_rx_size; i++) { + struct dma_desc *p; + int ret; + + if (priv->extend_desc) + p = &((rx_q->dma_erx + i)->basic); + else + p = rx_q->dma_rx + i; + + ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, + queue); + if (ret) + return ret; + + rx_q->buf_alloc_num++; + } + + return 0; +} + +/** + * dma_free_rx_xskbufs - free RX dma buffers from XSK pool + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + */ +static void dma_free_rx_xskbufs(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + for (i = 0; i < dma_conf->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + + if (!buf->xdp) + continue; + + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + } +} + +static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int i; + + for (i = 0; i < dma_conf->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf; + dma_addr_t dma_addr; + struct dma_desc *p; + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + i); + else + p = rx_q->dma_rx + i; + + buf = &rx_q->buf_pool[i]; + + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) + return -ENOMEM; + + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, p, dma_addr); + rx_q->buf_alloc_num++; + } + + return 0; +} + +static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) +{ + if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) + return NULL; + + return xsk_get_pool_from_qid(priv->dev, queue); +} + +/** + * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + * @flags: gfp flag. + * Description: this function initializes the DMA RX descriptors + * and allocates the socket buffers. It supports the chained and ring + * modes. + */ +static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue, gfp_t flags) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + int ret; + + netif_dbg(priv, probe, priv->dev, + "(%s) dma_rx_phy=0x%08x\n", __func__, + (u32)rx_q->dma_rx_phy); + + stmmac_clear_rx_descriptors(priv, dma_conf, queue); + + xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); + + rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); + + if (rx_q->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + netdev_info(priv->dev, + "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", + rx_q->queue_index); + xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, + MEM_TYPE_PAGE_POOL, + rx_q->page_pool)); + netdev_info(priv->dev, + "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", + rx_q->queue_index); + } + + if (rx_q->xsk_pool) { + /* RX XDP ZC buffer pool may not be populated, e.g. + * xdpsock TX-only. + */ + stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); + } else { + ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); + if (ret < 0) + return -ENOMEM; + } + + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, rx_q->dma_erx, + rx_q->dma_rx_phy, + dma_conf->dma_rx_size, 1); + else + stmmac_mode_init(priv, rx_q->dma_rx, + rx_q->dma_rx_phy, + dma_conf->dma_rx_size, 0); + } + + return 0; +} + +static int init_dma_rx_desc_rings(struct net_device *dev, + struct stmmac_dma_conf *dma_conf, + gfp_t flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_count = priv->plat->rx_queues_to_use; + int queue; + int ret; + + /* RX INITIALIZATION */ + netif_dbg(priv, probe, priv->dev, + "SKB addresses:\nskb\t\tskb data\tdma data\n"); + + for (queue = 0; queue < rx_count; queue++) { + ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); + if (ret) + goto err_init_rx_buffers; + } + + return 0; + +err_init_rx_buffers: + while (queue >= 0) { + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, dma_conf, queue); + else + dma_free_rx_skbufs(priv, dma_conf, queue); + + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; + + queue--; + } + + return ret; +} + +/** + * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) + * @priv: driver private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index + * Description: this function initializes the DMA TX descriptors + * and allocates the socket buffers. It supports the chained and ring + * modes. + */ +static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + int i; + + netif_dbg(priv, probe, priv->dev, + "(%s) dma_tx_phy=0x%08x\n", __func__, + (u32)tx_q->dma_tx_phy); + + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, tx_q->dma_etx, + tx_q->dma_tx_phy, + dma_conf->dma_tx_size, 1); + else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) + stmmac_mode_init(priv, tx_q->dma_tx, + tx_q->dma_tx_phy, + dma_conf->dma_tx_size, 0); + } + + tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); + + for (i = 0; i < dma_conf->dma_tx_size; i++) { + struct dma_desc *p; + + if (priv->extend_desc) + p = &((tx_q->dma_etx + i)->basic); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &((tx_q->dma_entx + i)->basic); + else + p = tx_q->dma_tx + i; + + stmmac_clear_desc(priv, p); + + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; + tx_q->tx_skbuff_dma[i].len = 0; + tx_q->tx_skbuff_dma[i].last_segment = false; + tx_q->tx_skbuff[i] = NULL; + } + + return 0; +} + +static int init_dma_tx_desc_rings(struct net_device *dev, + struct stmmac_dma_conf *dma_conf) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_queue_cnt; + u32 queue; + + tx_queue_cnt = priv->plat->tx_queues_to_use; + + for (queue = 0; queue < tx_queue_cnt; queue++) + __init_dma_tx_desc_rings(priv, dma_conf, queue); + + return 0; +} + +/** + * init_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * @dma_conf: structure to take the dma data + * @flags: gfp flag. + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. It supports the chained and ring + * modes. + */ +static int init_dma_desc_rings(struct net_device *dev, + struct stmmac_dma_conf *dma_conf, + gfp_t flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + ret = init_dma_rx_desc_rings(dev, dma_conf, flags); + if (ret) + return ret; + + ret = init_dma_tx_desc_rings(dev, dma_conf); + + stmmac_clear_descriptors(priv, dma_conf); + + if (netif_msg_hw(priv)) + stmmac_display_rings(priv, dma_conf); + + return ret; +} + +/** + * dma_free_tx_skbufs - free TX dma buffers + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index + */ +static void dma_free_tx_skbufs(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + int i; + + tx_q->xsk_frames_done = 0; + + for (i = 0; i < dma_conf->dma_tx_size; i++) + stmmac_free_tx_buffer(priv, dma_conf, queue, i); + + if (tx_q->xsk_pool && tx_q->xsk_frames_done) { + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + tx_q->xsk_frames_done = 0; + tx_q->xsk_pool = NULL; + } +} + +/** + * stmmac_free_tx_skbufs - free TX skb buffers + * @priv: private structure + */ +static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) +{ + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + for (queue = 0; queue < tx_queue_cnt; queue++) + dma_free_tx_skbufs(priv, &priv->dma_conf, queue); +} + +/** + * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + */ +static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + + /* Release the DMA RX socket buffers */ + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, dma_conf, queue); + else + dma_free_rx_skbufs(priv, dma_conf, queue); + + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; + + /* Free DMA regions of consistent memory previously allocated */ + if (!priv->extend_desc) + dma_free_coherent(priv->device, dma_conf->dma_rx_size * + sizeof(struct dma_desc), + rx_q->dma_rx, rx_q->dma_rx_phy); + else + dma_free_coherent(priv->device, dma_conf->dma_rx_size * + sizeof(struct dma_extended_desc), + rx_q->dma_erx, rx_q->dma_rx_phy); + + if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) + xdp_rxq_info_unreg(&rx_q->xdp_rxq); + + kfree(rx_q->buf_pool); + if (rx_q->page_pool) + page_pool_destroy(rx_q->page_pool); +} + +static void free_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + + /* Free RX queue resources */ + for (queue = 0; queue < rx_count; queue++) + __free_dma_rx_desc_resources(priv, dma_conf, queue); +} + +/** + * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index + */ +static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + size_t size; + void *addr; + + /* Release the DMA TX socket buffers */ + dma_free_tx_skbufs(priv, dma_conf, queue); + + if (priv->extend_desc) { + size = sizeof(struct dma_extended_desc); + addr = tx_q->dma_etx; + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { + size = sizeof(struct dma_edesc); + addr = tx_q->dma_entx; + } else { + size = sizeof(struct dma_desc); + addr = tx_q->dma_tx; + } + + size *= dma_conf->dma_tx_size; + + dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); + + kfree(tx_q->tx_skbuff_dma); + kfree(tx_q->tx_skbuff); +} + +static void free_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; + + /* Free TX queue resources */ + for (queue = 0; queue < tx_count; queue++) + __free_dma_tx_desc_resources(priv, dma_conf, queue); +} + +/** + * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: RX queue index + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + bool xdp_prog = stmmac_xdp_is_enabled(priv); + struct page_pool_params pp_params = { 0 }; + unsigned int num_pages; + unsigned int napi_id; + int ret; + + rx_q->queue_index = queue; + rx_q->priv_data = priv; + + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = dma_conf->dma_rx_size; + num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); + pp_params.order = ilog2(num_pages); + pp_params.nid = dev_to_node(priv->device); + pp_params.dev = priv->device; + pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + pp_params.offset = stmmac_rx_offset(priv); + pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); + + rx_q->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_q->page_pool)) { + ret = PTR_ERR(rx_q->page_pool); + rx_q->page_pool = NULL; + return ret; + } + + rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, + sizeof(*rx_q->buf_pool), + GFP_KERNEL); + if (!rx_q->buf_pool) + return -ENOMEM; + + if (priv->extend_desc) { + rx_q->dma_erx = dma_alloc_coherent(priv->device, + dma_conf->dma_rx_size * + sizeof(struct dma_extended_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_erx) + return -ENOMEM; + + } else { + rx_q->dma_rx = dma_alloc_coherent(priv->device, + dma_conf->dma_rx_size * + sizeof(struct dma_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_rx) + return -ENOMEM; + } + + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) + napi_id = ch->rxtx_napi.napi_id; + else + napi_id = ch->rx_napi.napi_id; + + ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, + rx_q->queue_index, + napi_id); + if (ret) { + netdev_err(priv->dev, "Failed to register xdp rxq info\n"); + return -EINVAL; + } + + return 0; +} + +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + int ret; + + /* RX queues buffers and DMA */ + for (queue = 0; queue < rx_count; queue++) { + ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); + if (ret) + goto err_dma; + } + + return 0; + +err_dma: + free_dma_rx_desc_resources(priv, dma_conf); + + return ret; +} + +/** + * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). + * @priv: private structure + * @dma_conf: structure to take the dma data + * @queue: TX queue index + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) +{ + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; + size_t size; + void *addr; + + tx_q->queue_index = queue; + tx_q->priv_data = priv; + + tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, + sizeof(*tx_q->tx_skbuff_dma), + GFP_KERNEL); + if (!tx_q->tx_skbuff_dma) + return -ENOMEM; + + tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!tx_q->tx_skbuff) + return -ENOMEM; + + if (priv->extend_desc) + size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + size = sizeof(struct dma_edesc); + else + size = sizeof(struct dma_desc); + + size *= dma_conf->dma_tx_size; + + addr = dma_alloc_coherent(priv->device, size, + &tx_q->dma_tx_phy, GFP_KERNEL); + if (!addr) + return -ENOMEM; + + if (priv->extend_desc) + tx_q->dma_etx = addr; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_q->dma_entx = addr; + else + tx_q->dma_tx = addr; + + return 0; +} + +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; + int ret; + + /* TX queues buffers and DMA */ + for (queue = 0; queue < tx_count; queue++) { + ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); + if (ret) + goto err_dma; + } + + return 0; + +err_dma: + free_dma_tx_desc_resources(priv, dma_conf); + return ret; +} + +/** + * alloc_dma_desc_resources - alloc TX/RX resources. + * @priv: private structure + * @dma_conf: structure to take the dma data + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int alloc_dma_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + /* RX Allocation */ + int ret = alloc_dma_rx_desc_resources(priv, dma_conf); + + if (ret) + return ret; + + ret = alloc_dma_tx_desc_resources(priv, dma_conf); + + return ret; +} + +/** + * free_dma_desc_resources - free dma desc resources + * @priv: private structure + * @dma_conf: structure to take the dma data + */ +static void free_dma_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) +{ + /* Release the DMA TX socket buffers */ + free_dma_tx_desc_resources(priv, dma_conf); + + /* Release the DMA RX socket buffers later + * to ensure all pending XDP_TX buffers are returned. + */ + free_dma_rx_desc_resources(priv, dma_conf); +} + +/** + * stmmac_mac_enable_rx_queues - Enable MAC rx queues + * @priv: driver private structure + * Description: It is used for enabling the rx queues in the MAC + */ +static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + int queue; + u8 mode; + + for (queue = 0; queue < rx_queues_count; queue++) { + mode = priv->plat->rx_queues_cfg[queue].mode_to_use; + stmmac_rx_queue_enable(priv, priv->hw, mode, queue); + } +} + +/** + * stmmac_start_rx_dma - start RX DMA channel + * @priv: driver private structure + * @chan: RX channel index + * Description: + * This starts a RX DMA channel + */ +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); + stmmac_start_rx(priv, priv->ioaddr, chan); +} + +/** + * stmmac_start_tx_dma - start TX DMA channel + * @priv: driver private structure + * @chan: TX channel index + * Description: + * This starts a TX DMA channel + */ +static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); + stmmac_start_tx(priv, priv->ioaddr, chan); +} + +/** + * stmmac_stop_rx_dma - stop RX DMA channel + * @priv: driver private structure + * @chan: RX channel index + * Description: + * This stops a RX DMA channel + */ +static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); + stmmac_stop_rx(priv, priv->ioaddr, chan); +} + +/** + * stmmac_stop_tx_dma - stop TX DMA channel + * @priv: driver private structure + * @chan: TX channel index + * Description: + * This stops a TX DMA channel + */ +static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); + stmmac_stop_tx(priv, priv->ioaddr, chan); +} + +static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); + u32 chan; + + for (chan = 0; chan < dma_csr_ch; chan++) { + struct stmmac_channel *ch = &priv->channel[chan]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } +} + +/** + * stmmac_start_all_dma - start all RX and TX DMA channels + * @priv: driver private structure + * Description: + * This starts all the RX and TX DMA channels + */ +static void stmmac_start_all_dma(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; + + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_start_rx_dma(priv, chan); + + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_start_tx_dma(priv, chan); +} + +/** + * stmmac_stop_all_dma - stop all RX and TX DMA channels + * @priv: driver private structure + * Description: + * This stops the RX and TX DMA channels + */ +static void stmmac_stop_all_dma(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; + + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_stop_rx_dma(priv, chan); + + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_stop_tx_dma(priv, chan); +} + +/** + * stmmac_dma_operation_mode - HW DMA operation mode + * @priv: driver private structure + * Description: it is used for configuring the DMA operation mode register in + * order to program the tx/rx DMA thresholds or Store-And-Forward mode. + */ +static void stmmac_dma_operation_mode(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + int rxfifosz = priv->plat->rx_fifo_size; + int txfifosz = priv->plat->tx_fifo_size; + u32 txmode = 0; + u32 rxmode = 0; + u32 chan = 0; + u8 qmode = 0; + + if (rxfifosz == 0) + rxfifosz = priv->dma_cap.rx_fifo_size; + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + /* Adjust for real per queue fifo size */ + rxfifosz /= rx_channels_count; + txfifosz /= tx_channels_count; + + if (priv->plat->force_thresh_dma_mode) { + txmode = tc; + rxmode = tc; + } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { + /* + * In case of GMAC, SF mode can be enabled + * to perform the TX COE in HW. This depends on: + * 1) TX COE if actually supported + * 2) There is no bugged Jumbo frame support + * that needs to not insert csum in the TDES. + */ + txmode = SF_DMA_MODE; + rxmode = SF_DMA_MODE; + priv->xstats.threshold = SF_DMA_MODE; + } else { + txmode = tc; + rxmode = SF_DMA_MODE; + } + + /* configure all channels */ + for (chan = 0; chan < rx_channels_count; chan++) { + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; + u32 buf_size; + + qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; + + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, + rxfifosz, qmode); + + if (rx_q->xsk_pool) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + chan); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_conf.dma_buf_sz, + chan); + } + } + + for (chan = 0; chan < tx_channels_count; chan++) { + qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; + + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, + txfifosz, qmode); + } +} + +static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct xsk_buff_pool *pool = tx_q->xsk_pool; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc = NULL; + struct xdp_desc xdp_desc; + bool work_done = true; + + /* Avoids TX time-out as we are sharing with slow path */ + txq_trans_cond_update(nq); + + budget = min(budget, stmmac_tx_avail(priv, queue)); + + while (budget-- > 0) { + dma_addr_t dma_addr; + bool set_ic; + + /* We are sharing with slow path and stop XSK TX desc submission when + * available TX ring is less than threshold. + */ + if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || + !netif_carrier_ok(priv->dev)) { + work_done = false; + break; + } + + if (!xsk_tx_peek_desc(pool, &xdp_desc)) + break; + + if (likely(priv->extend_desc)) + tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_desc = &tx_q->dma_entx[entry].basic; + else + tx_desc = tx_q->dma_tx + entry; + + dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; + + /* To return XDP buffer to XSK pool, we simple call + * xsk_tx_completed(), so we don't need to fill up + * 'buf' and 'xdpf'. + */ + tx_q->tx_skbuff_dma[entry].buf = 0; + tx_q->xdpf[entry] = NULL; + + tx_q->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; + tx_q->tx_skbuff_dma[entry].last_segment = true; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + tx_q->tx_count_frames++; + + if (!priv->tx_coal_frames[queue]) + set_ic = false; + else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, tx_desc); + priv->xstats.tx_set_ic_bit++; + } + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, + true, priv->mode, true, true, + xdp_desc.len); + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); + entry = tx_q->cur_tx; + } + + if (tx_desc) { + stmmac_flush_tx_descriptors(priv, queue); + xsk_tx_release(pool); + } + + /* Return true if all of the 3 conditions are met + * a) TX Budget is still available + * b) work_done = true when XSK TX desc peek is empty (no more + * pending XSK TX for transmission) + */ + return !!budget && work_done; +} + +static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) +{ + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { + tc += 64; + + if (priv->plat->force_thresh_dma_mode) + stmmac_set_dma_operation_mode(priv, tc, tc, chan); + else + stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, + chan); + + priv->xstats.threshold = tc; + } +} + +/** + * stmmac_tx_clean - to manage the transmission completion + * @priv: driver private structure + * @budget: napi budget limiting this functions packet handling + * @queue: TX queue index + * Description: it reclaims the transmit resources after transmission completes. + */ +static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + unsigned int bytes_compl = 0, pkts_compl = 0; + unsigned int entry, xmits = 0, count = 0; + + __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); + + priv->xstats.tx_clean++; + + tx_q->xsk_frames_done = 0; + + entry = tx_q->dirty_tx; + + /* Try to clean all TX complete frame in 1 shot */ + while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { + struct xdp_frame *xdpf; + struct sk_buff *skb; + struct dma_desc *p; + int status; + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { + xdpf = tx_q->xdpf[entry]; + skb = NULL; + } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + xdpf = NULL; + skb = tx_q->tx_skbuff[entry]; + } else { + xdpf = NULL; + skb = NULL; + } + + if (priv->extend_desc) + p = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &tx_q->dma_entx[entry].basic; + else + p = tx_q->dma_tx + entry; + + status = stmmac_tx_status(priv, &priv->dev->stats, + &priv->xstats, p, priv->ioaddr); + /* Check if the descriptor is owned by the DMA */ + if (unlikely(status & tx_dma_own)) + break; + + count++; + + /* Make sure descriptor fields are read after reading + * the own bit. + */ + dma_rmb(); + + /* Just consider the last segment and ...*/ + if (likely(!(status & tx_not_ls))) { + /* ... verify the status error condition */ + if (unlikely(status & tx_err)) { + priv->dev->stats.tx_errors++; + if (unlikely(status & tx_err_bump_tc)) + stmmac_bump_dma_threshold(priv, queue); + } else { + priv->dev->stats.tx_packets++; + priv->xstats.tx_pkt_n++; + priv->xstats.txq_stats[queue].tx_pkt_n++; + } + if (skb) + stmmac_get_tx_hwtstamp(priv, p, skb); + } + + if (likely(tx_q->tx_skbuff_dma[entry].buf && + tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { + if (tx_q->tx_skbuff_dma[entry].map_as_page) + dma_unmap_page(priv->device, + tx_q->tx_skbuff_dma[entry].buf, + tx_q->tx_skbuff_dma[entry].len, + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + tx_q->tx_skbuff_dma[entry].buf, + tx_q->tx_skbuff_dma[entry].len, + DMA_TO_DEVICE); + tx_q->tx_skbuff_dma[entry].buf = 0; + tx_q->tx_skbuff_dma[entry].len = 0; + tx_q->tx_skbuff_dma[entry].map_as_page = false; + } + + stmmac_clean_desc3(priv, tx_q, p); + + tx_q->tx_skbuff_dma[entry].last_segment = false; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + if (xdpf && + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + tx_q->xdpf[entry] = NULL; + } + + if (xdpf && + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { + xdp_return_frame(xdpf); + tx_q->xdpf[entry] = NULL; + } + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + if (likely(skb)) { + pkts_compl++; + bytes_compl += skb->len; + dev_consume_skb_any(skb); + tx_q->tx_skbuff[entry] = NULL; + } + } + + stmmac_release_tx_desc(priv, p, priv->mode); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + } + tx_q->dirty_tx = entry; + + netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), + pkts_compl, bytes_compl); + + if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, + queue))) && + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { + + netif_dbg(priv, tx_done, priv->dev, + "%s: restart transmit\n", __func__); + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); + } + + if (tx_q->xsk_pool) { + bool work_done; + + if (tx_q->xsk_frames_done) + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + + if (xsk_uses_need_wakeup(tx_q->xsk_pool)) + xsk_set_tx_need_wakeup(tx_q->xsk_pool); + + /* For XSK TX, we try to send as many as possible. + * If XSK work done (XSK TX desc empty and budget still + * available), return "budget - 1" to reenable TX IRQ. + * Else, return "budget" to make NAPI continue polling. + */ + work_done = stmmac_xdp_xmit_zc(priv, queue, + STMMAC_XSK_TX_BUDGET_MAX); + if (work_done) + xmits = budget - 1; + else + xmits = budget; + } + + if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && + priv->eee_sw_timer_en) { + if (stmmac_enable_eee_mode(priv)) + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); + } + + /* We still have pending packets, let's call for a new scheduling */ + if (tx_q->dirty_tx != tx_q->cur_tx) + stmmac_tx_timer_arm(priv, queue); + + __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); + + /* Combine decisions from TX clean and XSK TX */ + return max(count, xmits); +} + +/** + * stmmac_tx_err - to manage the tx error + * @priv: driver private structure + * @chan: channel index + * Description: it cleans the descriptors and restarts the transmission + * in case of transmission errors. + */ +static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); + + stmmac_stop_tx_dma(priv, chan); + dma_free_tx_skbufs(priv, &priv->dma_conf, chan); + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); + stmmac_reset_tx_queue(priv, chan); + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + stmmac_start_tx_dma(priv, chan); + + priv->dev->stats.tx_errors++; + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); +} + +/** + * stmmac_set_dma_operation_mode - Set DMA operation mode by channel + * @priv: driver private structure + * @txmode: TX operating mode + * @rxmode: RX operating mode + * @chan: channel index + * Description: it is used for configuring of the DMA operation mode in + * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward + * mode. + */ +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, + u32 rxmode, u32 chan) +{ + u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; + u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + int rxfifosz = priv->plat->rx_fifo_size; + int txfifosz = priv->plat->tx_fifo_size; + + if (rxfifosz == 0) + rxfifosz = priv->dma_cap.rx_fifo_size; + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + /* Adjust for real per queue fifo size */ + rxfifosz /= rx_channels_count; + txfifosz /= tx_channels_count; + + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); +} + +static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) +{ + int ret; + + ret = stmmac_safety_feat_irq_status(priv, priv->dev, + priv->ioaddr, priv->dma_cap.asp, &priv->sstats); + if (ret && (ret != -EINVAL)) { + stmmac_global_err(priv); + return true; + } + + return false; +} + +static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) +{ + int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan, dir); + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + struct stmmac_channel *ch = &priv->channel[chan]; + struct napi_struct *rx_napi; + struct napi_struct *tx_napi; + unsigned long flags; + + rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; + tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; + + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { + if (napi_schedule_prep(rx_napi)) { + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); + __napi_schedule(rx_napi); + } + } + + if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { + if (napi_schedule_prep(tx_napi)) { + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + __napi_schedule(tx_napi); + } + } + + return status; +} + +/** + * stmmac_dma_interrupt - DMA ISR + * @priv: driver private structure + * Description: this is the DMA ISR. It is called by the main ISR. + * It calls the dwmac dma routine and schedule poll method in case of some + * work can be done. + */ +static void stmmac_dma_interrupt(struct stmmac_priv *priv) +{ + u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 rx_channel_count = priv->plat->rx_queues_to_use; + u32 channels_to_check = tx_channel_count > rx_channel_count ? + tx_channel_count : rx_channel_count; + u32 chan; + int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; + + /* Make sure we never check beyond our status buffer. */ + if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) + channels_to_check = ARRAY_SIZE(status); + + for (chan = 0; chan < channels_to_check; chan++) + status[chan] = stmmac_napi_check(priv, chan, + DMA_DIR_RXTX); + + for (chan = 0; chan < tx_channel_count; chan++) { + if (unlikely(status[chan] & tx_hard_error_bump_tc)) { + /* Try to bump up the dma threshold on this failure */ + stmmac_bump_dma_threshold(priv, chan); + } else if (unlikely(status[chan] == tx_hard_error)) { + stmmac_tx_err(priv, chan); + } + } +} + +/** + * stmmac_mmc_setup: setup the Mac Management Counters (MMC) + * @priv: driver private structure + * Description: this masks the MMC irq, in fact, the counters are managed in SW. + */ +static void stmmac_mmc_setup(struct stmmac_priv *priv) +{ + unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | + MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; + + stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); + + if (priv->dma_cap.rmon) { + stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); + memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); + } else + netdev_info(priv->dev, "No MAC Management Counters available\n"); +} + +/** + * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. + * @priv: driver private structure + * Description: + * new GMAC chip generations have a new register to indicate the + * presence of the optional feature/functions. + * This can be also used to override the value passed through the + * platform and necessary for old MAC10/100 and GMAC chips. + */ +static int stmmac_get_hw_features(struct stmmac_priv *priv) +{ + return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; +} + +/** + * stmmac_check_ether_addr - check if the MAC addr is valid + * @priv: driver private structure + * Description: + * it is to verify if the MAC address is valid, in case of failures it + * generates a random MAC address + */ +static void stmmac_check_ether_addr(struct stmmac_priv *priv) +{ + u8 addr[ETH_ALEN]; + + if (!is_valid_ether_addr(priv->dev->dev_addr)) { + stmmac_get_umac_addr(priv, priv->hw, addr, 0); + if (is_valid_ether_addr(addr)) + eth_hw_addr_set(priv->dev, addr); + else + eth_hw_addr_random(priv->dev); + dev_info(priv->device, "device MAC address %pM\n", + priv->dev->dev_addr); + } +} + +/** + * stmmac_init_dma_engine - DMA init. + * @priv: driver private structure + * Description: + * It inits the DMA invoking the specific MAC/GMAC callback. + * Some DMA parameters can be passed from the platform; + * in case of these are not passed a default is kept for the MAC or GMAC. + */ +static int stmmac_init_dma_engine(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + u32 chan = 0; + int atds = 0; + int ret = 0; + + if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { + dev_err(priv->device, "Invalid DMA configuration\n"); + return -EINVAL; + } + + if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) + atds = 1; + + ret = stmmac_reset(priv, priv->ioaddr); + if (ret) { + dev_err(priv->device, "Failed to reset the dma\n"); + return ret; + } + + /* DMA Configuration */ + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); + + if (priv->plat->axi) + stmmac_axi(priv, priv->ioaddr, priv->plat->axi); + + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) { + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } + + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_channels_count; chan++) { + rx_q = &priv->dma_conf.rx_queue[chan]; + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + } + + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_channels_count; chan++) { + tx_q = &priv->dma_conf.tx_queue[chan]; + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); + } + + return ret; +} + +static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + u32 tx_coal_timer = priv->tx_coal_timer[queue]; + + if (!tx_coal_timer) + return; + + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(tx_coal_timer), + HRTIMER_MODE_REL); +} + +/** + * stmmac_tx_timer - mitigation sw timer for tx. + * @t: data pointer + * Description: + * This is the timer handler to directly invoke the stmmac_tx_clean. + */ +static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) +{ + struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); + struct stmmac_priv *priv = tx_q->priv_data; + struct stmmac_channel *ch; + struct napi_struct *napi; + + ch = &priv->channel[tx_q->queue_index]; + napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; + + if (likely(napi_schedule_prep(napi))) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + __napi_schedule(napi); + } + + return HRTIMER_NORESTART; +} + +/** + * stmmac_init_coalesce - init mitigation options. + * @priv: driver private structure + * Description: + * This inits the coalesce parameters: i.e. timer rate, + * timer handler and default threshold used for enabling the + * interrupt on completion bit. + */ +static void stmmac_init_coalesce(struct stmmac_priv *priv) +{ + u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 rx_channel_count = priv->plat->rx_queues_to_use; + u32 chan; + + for (chan = 0; chan < tx_channel_count; chan++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + + priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; + priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; + + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + tx_q->txtimer.function = stmmac_tx_timer; + } + + for (chan = 0; chan < rx_channel_count; chan++) + priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; +} + +static void stmmac_set_rings_length(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan; + + /* set TX ring length */ + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_set_tx_ring_len(priv, priv->ioaddr, + (priv->dma_conf.dma_tx_size - 1), chan); + + /* set RX ring length */ + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_set_rx_ring_len(priv, priv->ioaddr, + (priv->dma_conf.dma_rx_size - 1), chan); +} + +/** + * stmmac_set_tx_queue_weight - Set TX queue weight + * @priv: driver private structure + * Description: It is used for setting TX queues weight + */ +static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 weight; + u32 queue; + + for (queue = 0; queue < tx_queues_count; queue++) { + weight = priv->plat->tx_queues_cfg[queue].weight; + stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); + } +} + +/** + * stmmac_configure_cbs - Configure CBS in TX queue + * @priv: driver private structure + * Description: It is used for configuring CBS in AVB TX queues + */ +static void stmmac_configure_cbs(struct stmmac_priv *priv) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 mode_to_use; + u32 queue; + + /* queue 0 is reserved for legacy traffic */ + for (queue = 1; queue < tx_queues_count; queue++) { + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; + if (mode_to_use == MTL_QUEUE_DCB) + continue; + + stmmac_config_cbs(priv, priv->hw, + priv->plat->tx_queues_cfg[queue].send_slope, + priv->plat->tx_queues_cfg[queue].idle_slope, + priv->plat->tx_queues_cfg[queue].high_credit, + priv->plat->tx_queues_cfg[queue].low_credit, + queue); + } +} + +/** + * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel + * @priv: driver private structure + * Description: It is used for mapping RX queues to RX dma channels + */ +static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 queue; + u32 chan; + + for (queue = 0; queue < rx_queues_count; queue++) { + chan = priv->plat->rx_queues_cfg[queue].chan; + stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); + } +} + +/** + * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority + * @priv: driver private structure + * Description: It is used for configuring the RX Queue Priority + */ +static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 queue; + u32 prio; + + for (queue = 0; queue < rx_queues_count; queue++) { + if (!priv->plat->rx_queues_cfg[queue].use_prio) + continue; + + prio = priv->plat->rx_queues_cfg[queue].prio; + stmmac_rx_queue_prio(priv, priv->hw, prio, queue); + } +} + +/** + * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority + * @priv: driver private structure + * Description: It is used for configuring the TX Queue Priority + */ +static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue; + u32 prio; + + for (queue = 0; queue < tx_queues_count; queue++) { + if (!priv->plat->tx_queues_cfg[queue].use_prio) + continue; + + prio = priv->plat->tx_queues_cfg[queue].prio; + stmmac_tx_queue_prio(priv, priv->hw, prio, queue); + } +} + +/** + * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing + * @priv: driver private structure + * Description: It is used for configuring the RX queue routing + */ +static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 queue; + u8 packet; + + for (queue = 0; queue < rx_queues_count; queue++) { + /* no specific packet type routing specified for the queue */ + if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) + continue; + + packet = priv->plat->rx_queues_cfg[queue].pkt_route; + stmmac_rx_queue_routing(priv, priv->hw, packet, queue); + } +} + +static void stmmac_mac_config_rss(struct stmmac_priv *priv) +{ + if (!priv->dma_cap.rssen || !priv->plat->rss_en) { + priv->rss.enable = false; + return; + } + + if (priv->dev->features & NETIF_F_RXHASH) + priv->rss.enable = true; + else + priv->rss.enable = false; + + stmmac_rss_configure(priv, priv->hw, &priv->rss, + priv->plat->rx_queues_to_use); +} + +/** + * stmmac_mtl_configuration - Configure MTL + * @priv: driver private structure + * Description: It is used for configurring MTL + */ +static void stmmac_mtl_configuration(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 tx_queues_count = priv->plat->tx_queues_to_use; + + if (tx_queues_count > 1) + stmmac_set_tx_queue_weight(priv); + + /* Configure MTL RX algorithms */ + if (rx_queues_count > 1) + stmmac_prog_mtl_rx_algorithms(priv, priv->hw, + priv->plat->rx_sched_algorithm); + + /* Configure MTL TX algorithms */ + if (tx_queues_count > 1) + stmmac_prog_mtl_tx_algorithms(priv, priv->hw, + priv->plat->tx_sched_algorithm); + + /* Configure CBS in AVB TX queues */ + if (tx_queues_count > 1) + stmmac_configure_cbs(priv); + + /* Map RX MTL to DMA channels */ + stmmac_rx_queue_dma_chan_map(priv); + + /* Enable MAC RX Queues */ + stmmac_mac_enable_rx_queues(priv); + + /* Set RX priorities */ + if (rx_queues_count > 1) + stmmac_mac_config_rx_queues_prio(priv); + + /* Set TX priorities */ + if (tx_queues_count > 1) + stmmac_mac_config_tx_queues_prio(priv); + + /* Set RX routing */ + if (rx_queues_count > 1) + stmmac_mac_config_rx_queues_routing(priv); + + /* Receive Side Scaling */ + if (rx_queues_count > 1) + stmmac_mac_config_rss(priv); +} + +static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) +{ + if (priv->dma_cap.asp) { + netdev_info(priv->dev, "Enabling Safety Features\n"); + stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, + priv->plat->safety_feat_cfg); + } else { + netdev_info(priv->dev, "No Safety Features support found\n"); + } +} + +static int stmmac_fpe_start_wq(struct stmmac_priv *priv) +{ + char *name; + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); + clear_bit(__FPE_REMOVING, &priv->fpe_task_state); + + name = priv->wq_name; + sprintf(name, "%s-fpe", priv->dev->name); + + priv->fpe_wq = create_singlethread_workqueue(name); + if (!priv->fpe_wq) { + netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); + + return -ENOMEM; + } + netdev_info(priv->dev, "FPE workqueue start"); + + return 0; +} + +/** + * stmmac_hw_setup - setup mac in a usable state. + * @dev : pointer to the device structure. + * @ptp_register: register PTP if set + * Description: + * this is the main function to setup the HW in a usable state because the + * dma engine is reset, the core registers are configured (e.g. AXI, + * Checksum features, timers). The DMA is ready to start receiving and + * transmitting. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + bool sph_en; + u32 chan; + int ret; + + /* DMA initialization and SW reset */ + ret = stmmac_init_dma_engine(priv); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA engine initialization failed\n", + __func__); + return ret; + } + + /* Copy the MAC addr into the HW */ + stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); + + /* PS and related bits will be programmed according to the speed */ + if (priv->hw->pcs) { + int speed = priv->plat->mac_port_sel_speed; + + if ((speed == SPEED_10) || (speed == SPEED_100) || + (speed == SPEED_1000)) { + priv->hw->ps = speed; + } else { + dev_warn(priv->device, "invalid port speed\n"); + priv->hw->ps = 0; + } + } + + /* Initialize the MAC Core */ + stmmac_core_init(priv, priv->hw, dev); + + /* Initialize MTL*/ + stmmac_mtl_configuration(priv); + + /* Initialize Safety Features */ + stmmac_safety_feat_configuration(priv); + + ret = stmmac_rx_ipc(priv, priv->hw); + if (!ret) { + netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); + priv->plat->rx_coe = STMMAC_RX_COE_NONE; + priv->hw->rx_csum = 0; + } + + /* Enable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, true); + + /* Set the HW DMA mode and the COE */ + stmmac_dma_operation_mode(priv); + + stmmac_mmc_setup(priv); + + if (ptp_register) { + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + } + + ret = stmmac_init_ptp(priv); + if (ret == -EOPNOTSUPP) + netdev_info(priv->dev, "PTP not supported by HW\n"); + else if (ret) + netdev_warn(priv->dev, "PTP init failed\n"); + else if (ptp_register) + stmmac_ptp_register(priv); + + priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; + + /* Convert the timer from msec to usec */ + if (!priv->tx_lpi_timer) + priv->tx_lpi_timer = eee_timer * 1000; + + if (priv->use_riwt) { + u32 queue; + + for (queue = 0; queue < rx_cnt; queue++) { + if (!priv->rx_riwt[queue]) + priv->rx_riwt[queue] = DEF_DMA_RIWT; + + stmmac_rx_watchdog(priv, priv->ioaddr, + priv->rx_riwt[queue], queue); + } + } + + if (priv->hw->pcs) + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); + + /* set TX and RX rings length */ + stmmac_set_rings_length(priv); + + /* Enable TSO */ + if (priv->tso) { + for (chan = 0; chan < tx_cnt; chan++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + + /* TSO and TBS cannot co-exist */ + if (tx_q->tbs & STMMAC_TBS_AVAIL) + continue; + + stmmac_enable_tso(priv, priv->ioaddr, 1, chan); + } + } + + /* Enable Split Header */ + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + for (chan = 0; chan < rx_cnt; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + + + /* VLAN Tag Insertion */ + if (priv->dma_cap.vlins) + stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); + + /* TBS */ + for (chan = 0; chan < tx_cnt; chan++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; + int enable = tx_q->tbs & STMMAC_TBS_AVAIL; + + stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); + } + + /* Configure real RX and TX queues */ + netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); + netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); + + /* Start the ball rolling... */ + stmmac_start_all_dma(priv); + + if (priv->dma_cap.fpesel) { + stmmac_fpe_start_wq(priv); + + if (priv->plat->fpe_cfg->enable) + stmmac_fpe_handshake(priv, true); + } + + return 0; +} + +static void stmmac_hw_teardown(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + clk_disable_unprepare(priv->plat->clk_ptp_ref); +} + +static void stmmac_free_irq(struct net_device *dev, + enum request_irq_err irq_err, int irq_idx) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int j; + + switch (irq_err) { + case REQ_IRQ_ERR_ALL: + irq_idx = priv->plat->tx_queues_to_use; + fallthrough; + case REQ_IRQ_ERR_TX: + for (j = irq_idx - 1; j >= 0; j--) { + if (priv->tx_irq[j] > 0) { + irq_set_affinity_hint(priv->tx_irq[j], NULL); + free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); + } + } + irq_idx = priv->plat->rx_queues_to_use; + fallthrough; + case REQ_IRQ_ERR_RX: + for (j = irq_idx - 1; j >= 0; j--) { + if (priv->rx_irq[j] > 0) { + irq_set_affinity_hint(priv->rx_irq[j], NULL); + free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); + } + } + + if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) + free_irq(priv->sfty_ue_irq, dev); + fallthrough; + case REQ_IRQ_ERR_SFTY_UE: + if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) + free_irq(priv->sfty_ce_irq, dev); + fallthrough; + case REQ_IRQ_ERR_SFTY_CE: + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) + free_irq(priv->lpi_irq, dev); + fallthrough; + case REQ_IRQ_ERR_LPI: + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) + free_irq(priv->wol_irq, dev); + fallthrough; + case REQ_IRQ_ERR_WOL: + free_irq(dev->irq, dev); + fallthrough; + case REQ_IRQ_ERR_MAC: + case REQ_IRQ_ERR_NO: + /* If MAC IRQ request error, no more IRQ to free */ + break; + } +} + +static int stmmac_request_irq_multi_msi(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + enum request_irq_err irq_err; + cpumask_t cpu_mask; + int irq_idx = 0; + char *int_name; + int ret; + int i; + + /* For common interrupt */ + int_name = priv->int_name_mac; + sprintf(int_name, "%s:%s", dev->name, "mac"); + ret = request_irq(dev->irq, stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc mac MSI %d (error: %d)\n", + __func__, dev->irq, ret); + irq_err = REQ_IRQ_ERR_MAC; + goto irq_error; + } + + /* Request the Wake IRQ in case of another line + * is used for WoL + */ + priv->wol_irq_disabled = true; + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { + int_name = priv->int_name_wol; + sprintf(int_name, "%s:%s", dev->name, "wol"); + ret = request_irq(priv->wol_irq, + stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc wol MSI %d (error: %d)\n", + __func__, priv->wol_irq, ret); + irq_err = REQ_IRQ_ERR_WOL; + goto irq_error; + } + } + + /* Request the LPI IRQ in case of another line + * is used for LPI + */ + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { + int_name = priv->int_name_lpi; + sprintf(int_name, "%s:%s", dev->name, "lpi"); + ret = request_irq(priv->lpi_irq, + stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc lpi MSI %d (error: %d)\n", + __func__, priv->lpi_irq, ret); + irq_err = REQ_IRQ_ERR_LPI; + goto irq_error; + } + } + + /* Request the Safety Feature Correctible Error line in + * case of another line is used + */ + if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { + int_name = priv->int_name_sfty_ce; + sprintf(int_name, "%s:%s", dev->name, "safety-ce"); + ret = request_irq(priv->sfty_ce_irq, + stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty ce MSI %d (error: %d)\n", + __func__, priv->sfty_ce_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY_CE; + goto irq_error; + } + } + + /* Request the Safety Feature Uncorrectible Error line in + * case of another line is used + */ + if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { + int_name = priv->int_name_sfty_ue; + sprintf(int_name, "%s:%s", dev->name, "safety-ue"); + ret = request_irq(priv->sfty_ue_irq, + stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty ue MSI %d (error: %d)\n", + __func__, priv->sfty_ue_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY_UE; + goto irq_error; + } + } + + /* Request Rx MSI irq */ + for (i = 0; i < priv->plat->rx_queues_to_use; i++) { + if (i >= MTL_MAX_RX_QUEUES) + break; + if (priv->rx_irq[i] == 0) + continue; + + int_name = priv->int_name_rx_irq[i]; + sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); + ret = request_irq(priv->rx_irq[i], + stmmac_msi_intr_rx, + 0, int_name, &priv->dma_conf.rx_queue[i]); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc rx-%d MSI %d (error: %d)\n", + __func__, i, priv->rx_irq[i], ret); + irq_err = REQ_IRQ_ERR_RX; + irq_idx = i; + goto irq_error; + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); + } + + /* Request Tx MSI irq */ + for (i = 0; i < priv->plat->tx_queues_to_use; i++) { + if (i >= MTL_MAX_TX_QUEUES) + break; + if (priv->tx_irq[i] == 0) + continue; + + int_name = priv->int_name_tx_irq[i]; + sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); + ret = request_irq(priv->tx_irq[i], + stmmac_msi_intr_tx, + 0, int_name, &priv->dma_conf.tx_queue[i]); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc tx-%d MSI %d (error: %d)\n", + __func__, i, priv->tx_irq[i], ret); + irq_err = REQ_IRQ_ERR_TX; + irq_idx = i; + goto irq_error; + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); + } + + return 0; + +irq_error: + stmmac_free_irq(dev, irq_err, irq_idx); + return ret; +} + +static int stmmac_request_irq_single(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + enum request_irq_err irq_err; + int ret; + + ret = request_irq(dev->irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); + irq_err = REQ_IRQ_ERR_MAC; + goto irq_error; + } + + /* Request the Wake IRQ in case of another line + * is used for WoL + */ + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { + ret = request_irq(priv->wol_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the WoL IRQ %d (%d)\n", + __func__, priv->wol_irq, ret); + irq_err = REQ_IRQ_ERR_WOL; + goto irq_error; + } + } + + /* Request the IRQ lines */ + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { + ret = request_irq(priv->lpi_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the LPI IRQ %d (%d)\n", + __func__, priv->lpi_irq, ret); + irq_err = REQ_IRQ_ERR_LPI; + goto irq_error; + } + } + + return 0; + +irq_error: + stmmac_free_irq(dev, irq_err, 0); + return ret; +} + +static int stmmac_request_irq(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + /* Request the IRQ lines */ + if (priv->plat->multi_msi_en) + ret = stmmac_request_irq_multi_msi(dev); + else + ret = stmmac_request_irq_single(dev); + + return ret; +} + +/** + * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue + * @priv: driver private structure + * @mtu: MTU to setup the dma queue and buf with + * Description: Allocate and generate a dma_conf based on the provided MTU. + * Allocate the Tx/Rx DMA queue and init them. + * Return value: + * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. + */ +static struct stmmac_dma_conf * +stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) +{ + struct stmmac_dma_conf *dma_conf; + int chan, bfsize, ret; + + dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); + if (!dma_conf) { + netdev_err(priv->dev, "%s: DMA conf allocation failed\n", + __func__); + return ERR_PTR(-ENOMEM); + } + + bfsize = stmmac_set_16kib_bfsize(priv, mtu); + if (bfsize < 0) + bfsize = 0; + + if (bfsize < BUF_SIZE_16KiB) + bfsize = stmmac_set_bfsize(mtu, 0); + + dma_conf->dma_buf_sz = bfsize; + /* Chose the tx/rx size from the already defined one in the + * priv struct. (if defined) + */ + dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; + dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; + + if (!dma_conf->dma_tx_size) + dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; + if (!dma_conf->dma_rx_size) + dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; + + /* Earlier check for TBS */ + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; + + /* Setup per-TXQ tbs flag before TX descriptor alloc */ + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; + } + + ret = alloc_dma_desc_resources(priv, dma_conf); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", + __func__); + goto alloc_error; + } + + ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", + __func__); + goto init_error; + } + + return dma_conf; + +init_error: + free_dma_desc_resources(priv, dma_conf); +alloc_error: + kfree(dma_conf); + return ERR_PTR(ret); +} + +/** + * __stmmac_open - open entry point of the driver + * @dev : pointer to the device structure. + * @dma_conf : structure to take the dma data + * Description: + * This function is the open entry point of the driver. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int __stmmac_open(struct net_device *dev, + struct stmmac_dma_conf *dma_conf) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int mode = priv->plat->phy_interface; + u32 chan; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + if (priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI && + (!priv->hw->xpcs || + xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { + ret = stmmac_init_phy(dev); + if (ret) { + netdev_err(priv->dev, + "%s: Cannot attach to PHY (error: %d)\n", + __func__, ret); + goto init_phy_error; + } + } + + /* Extra statistics */ + memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); + priv->xstats.threshold = tc; + + priv->rx_copybreak = STMMAC_RX_COPYBREAK; + + buf_sz = dma_conf->dma_buf_sz; + for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) + if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) + dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; + memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); + + stmmac_reset_queues_param(priv); + + if (priv->plat->serdes_powerup) { + ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); + if (ret < 0) { + netdev_err(priv->dev, "%s: Serdes powerup failed\n", + __func__); + goto init_error; + } + } + + ret = stmmac_hw_setup(dev, true); + if (ret < 0) { + netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); + goto init_error; + } + + stmmac_init_coalesce(priv); + + phylink_start(priv->phylink); + /* We may have called phylink_speed_down before */ + phylink_speed_up(priv->phylink); + + ret = stmmac_request_irq(dev); + if (ret) + goto irq_error; + + stmmac_enable_all_queues(priv); + netif_tx_start_all_queues(priv->dev); + stmmac_enable_all_dma_irq(priv); + + return 0; + +irq_error: + phylink_stop(priv->phylink); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + + stmmac_hw_teardown(dev); +init_error: + phylink_disconnect_phy(priv->phylink); +init_phy_error: + pm_runtime_put(priv->device); + return ret; +} + +static int stmmac_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct stmmac_dma_conf *dma_conf; + int ret; + + dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); + if (IS_ERR(dma_conf)) + return PTR_ERR(dma_conf); + + ret = __stmmac_open(dev, dma_conf); + if (ret) + free_dma_desc_resources(priv, dma_conf); + + kfree(dma_conf); + return ret; +} + +static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) +{ + set_bit(__FPE_REMOVING, &priv->fpe_task_state); + + if (priv->fpe_wq) { + destroy_workqueue(priv->fpe_wq); + priv->fpe_wq = NULL; + } + + netdev_info(priv->dev, "FPE workqueue stop"); +} + +/** + * stmmac_release - close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. + */ +static int stmmac_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; + + if (device_may_wakeup(priv->device)) + phylink_speed_down(priv->phylink, false); + /* Stop and disconnect the PHY */ + phylink_stop(priv->phylink); + phylink_disconnect_phy(priv->phylink); + + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + + netif_tx_disable(dev); + + /* Free the IRQ lines */ + stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); + + if (priv->eee_enabled) { + priv->tx_path_in_lpi_mode = false; + del_timer_sync(&priv->eee_ctrl_timer); + } + + /* Stop TX/RX DMA and clear the descriptors */ + stmmac_stop_all_dma(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv, &priv->dma_conf); + + /* Disable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, false); + + /* Powerdown Serdes if there is */ + if (priv->plat->serdes_powerdown) + priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); + + netif_carrier_off(dev); + + stmmac_release_ptp(priv); + + pm_runtime_put(priv->device); + + if (priv->dma_cap.fpesel) + stmmac_fpe_stop_wq(priv); + + return 0; +} + +static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, + struct stmmac_tx_queue *tx_q) +{ + u16 tag = 0x0, inner_tag = 0x0; + u32 inner_type = 0x0; + struct dma_desc *p; + + if (!priv->dma_cap.vlins) + return false; + if (!skb_vlan_tag_present(skb)) + return false; + if (skb->vlan_proto == htons(ETH_P_8021AD)) { + inner_tag = skb_vlan_tag_get(skb); + inner_type = STMMAC_VLAN_INSERT; + } + + tag = skb_vlan_tag_get(skb); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + p = &tx_q->dma_tx[tx_q->cur_tx]; + + if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) + return false; + + stmmac_set_tx_owner(priv, p); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); + return true; +} + +/** + * stmmac_tso_allocator - close entry point of the driver + * @priv: driver private structure + * @des: buffer start address + * @total_len: total length to fill in descriptors + * @last_segment: condition for the last descriptor + * @queue: TX queue index + * Description: + * This function fills descriptor and request new descriptors according to + * buffer length to fill + */ +static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, + int total_len, bool last_segment, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct dma_desc *desc; + u32 buff_size; + int tmp_len; + + tmp_len = total_len; + + while (tmp_len > 0) { + dma_addr_t curr_addr; + + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, + priv->dma_conf.dma_tx_size); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + desc = &tx_q->dma_tx[tx_q->cur_tx]; + + curr_addr = des + (total_len - tmp_len); + if (priv->dma_cap.addr64 <= 32) + desc->des0 = cpu_to_le32(curr_addr); + else + stmmac_set_desc_addr(priv, desc, curr_addr); + + buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? + TSO_MAX_BUFF_SIZE : tmp_len; + + stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, + 0, 1, + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), + 0, 0); + + tmp_len -= TSO_MAX_BUFF_SIZE; + } +} + +static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + int desc_size; + + if (likely(priv->extend_desc)) + desc_size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc_size = sizeof(struct dma_edesc); + else + desc_size = sizeof(struct dma_desc); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + wmb(); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); +} + +/** + * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) + * @skb : the socket buffer + * @dev : device pointer + * Description: this is the transmit function that is called on TSO frames + * (support available on GMAC4 and newer chips). + * Diagram below show the ring programming in case of TSO frames: + * + * First Descriptor + * -------- + * | DES0 |---> buffer1 = L2/L3/L4 header + * | DES1 |---> TCP Payload (can continue on next descr...) + * | DES2 |---> buffer 1 and 2 len + * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] + * -------- + * | + * ... + * | + * -------- + * | DES0 | --| Split TCP Payload on Buffers 1 and 2 + * | DES1 | --| + * | DES2 | --> buffer 1 and 2 len + * | DES3 | + * -------- + * + * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. + */ +static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dma_desc *desc, *first, *mss_desc = NULL; + struct stmmac_priv *priv = netdev_priv(dev); + int nfrags = skb_shinfo(skb)->nr_frags; + u32 queue = skb_get_queue_mapping(skb); + unsigned int first_entry, tx_packets; + int tmp_pay_len = 0, first_tx; + struct stmmac_tx_queue *tx_q; + bool has_vlan, set_ic; + u8 proto_hdr_len, hdr; + u32 pay_len, mss; + dma_addr_t des; + int i; + + tx_q = &priv->dma_conf.tx_queue[queue]; + first_tx = tx_q->cur_tx; + + /* Compute header lengths */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); + hdr = sizeof(struct udphdr); + } else { + proto_hdr_len = skb_tcp_all_headers(skb); + hdr = tcp_hdrlen(skb); + } + + /* Desc availability based on threshold should be enough safe */ + if (unlikely(stmmac_tx_avail(priv, queue) < + (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, + queue)); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx Ring full when queue awake\n", + __func__); + } + return NETDEV_TX_BUSY; + } + + pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ + + mss = skb_shinfo(skb)->gso_size; + + /* set new MSS value if needed */ + if (mss != tx_q->mss) { + if (tx_q->tbs & STMMAC_TBS_AVAIL) + mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; + + stmmac_set_mss(priv, mss_desc, mss); + tx_q->mss = mss; + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, + priv->dma_conf.dma_tx_size); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); + } + + if (netif_msg_tx_queued(priv)) { + pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", + __func__, hdr, proto_hdr_len, pay_len, mss); + pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, + skb->data_len); + } + + /* Check if VLAN can be inserted by HW */ + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); + + first_entry = tx_q->cur_tx; + WARN_ON(tx_q->tx_skbuff[first_entry]); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[first_entry].basic; + else + desc = &tx_q->dma_tx[first_entry]; + first = desc; + + if (has_vlan) + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); + + /* first descriptor: fill Headers on Buf1 */ + des = dma_map_single(priv->device, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + + tx_q->tx_skbuff_dma[first_entry].buf = des; + tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; + + if (priv->dma_cap.addr64 <= 32) { + first->des0 = cpu_to_le32(des); + + /* Fill start of payload in buff2 of first descriptor */ + if (pay_len) + first->des1 = cpu_to_le32(des + proto_hdr_len); + + /* If needed take extra descriptors to fill the remaining payload */ + tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; + } else { + stmmac_set_desc_addr(priv, first, des); + tmp_pay_len = pay_len; + des += proto_hdr_len; + pay_len = 0; + } + + stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); + + /* Prepare fragments */ + for (i = 0; i < nfrags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + des = skb_frag_dma_map(priv->device, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + + stmmac_tso_allocator(priv, des, skb_frag_size(frag), + (i == nfrags - 1), queue); + + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); + tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; + } + + tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; + + /* Only the last descriptor gets to point to the skb. */ + tx_q->tx_skbuff[tx_q->cur_tx] = skb; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; + + /* Manage tx mitigation */ + tx_packets = (tx_q->cur_tx + 1) - first_tx; + tx_q->tx_count_frames += tx_packets; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) + set_ic = true; + else if (!priv->tx_coal_frames[queue]) + set_ic = false; + else if (tx_packets > priv->tx_coal_frames[queue]) + set_ic = true; + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + desc = &tx_q->dma_tx[tx_q->cur_tx]; + + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, desc); + priv->xstats.tx_set_ic_bit++; + } + + /* We've used all descriptors we need for this skb, however, + * advance cur_tx so that it references a fresh descriptor. + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); + + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", + __func__); + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); + } + + dev->stats.tx_bytes += skb->len; + priv->xstats.tx_tso_frames++; + priv->xstats.tx_tso_nfrags += nfrags; + + if (priv->sarc_type) + stmmac_set_desc_sarc(priv, first, priv->sarc_type); + + skb_tx_timestamp(skb); + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + stmmac_enable_tx_timestamp(priv, first); + } + + /* Complete the first descriptor before granting the DMA */ + stmmac_prepare_tso_tx_desc(priv, first, 1, + proto_hdr_len, + pay_len, + 1, tx_q->tx_skbuff_dma[first_entry].last_segment, + hdr / 4, (skb->len - proto_hdr_len)); + + /* If context desc is used to change MSS */ + if (mss_desc) { + /* Make sure that first descriptor has been completely + * written, including its own bit. This is because MSS is + * actually before first descriptor, so we need to make + * sure that MSS's own bit is the last thing written. + */ + dma_wmb(); + stmmac_set_tx_owner(priv, mss_desc); + } + + if (netif_msg_pktdata(priv)) { + pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, + tx_q->cur_tx, first, nfrags); + pr_info(">>> frame to be transmitted: "); + print_pkt(skb->data, skb_headlen(skb)); + } + + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + + stmmac_flush_tx_descriptors(priv, queue); + stmmac_tx_timer_arm(priv, queue); + + return NETDEV_TX_OK; + +dma_map_err: + dev_err(priv->device, "Tx dma map failed\n"); + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +/** + * stmmac_xmit - Tx entry point of the driver + * @skb : the socket buffer + * @dev : device pointer + * Description : this is the tx entry point of the driver. + * It programs the chain or the ring and supports oversized frames + * and SG feature. + */ +static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned int first_entry, tx_packets, enh_desc; + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int nopaged_len = skb_headlen(skb); + int i, csum_insertion = 0, is_jumbo = 0; + u32 queue = skb_get_queue_mapping(skb); + int nfrags = skb_shinfo(skb)->nr_frags; + int gso = skb_shinfo(skb)->gso_type; + struct dma_edesc *tbs_desc = NULL; + struct dma_desc *desc, *first; + struct stmmac_tx_queue *tx_q; + bool has_vlan, set_ic; + int entry, first_tx; + dma_addr_t des; + + tx_q = &priv->dma_conf.tx_queue[queue]; + first_tx = tx_q->cur_tx; + + if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) + stmmac_disable_eee_mode(priv); + + /* Manage oversized TCP frames for GMAC4 device */ + if (skb_is_gso(skb) && priv->tso) { + if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) + return stmmac_tso_xmit(skb, dev); + if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) + return stmmac_tso_xmit(skb, dev); + } + + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, + queue)); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx Ring full when queue awake\n", + __func__); + } + return NETDEV_TX_BUSY; + } + + /* Check if VLAN can be inserted by HW */ + has_vlan = stmmac_vlan_insert(priv, skb, tx_q); + + entry = tx_q->cur_tx; + first_entry = entry; + WARN_ON(tx_q->tx_skbuff[first_entry]); + + csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); + + if (likely(priv->extend_desc)) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[entry].basic; + else + desc = tx_q->dma_tx + entry; + + first = desc; + + if (has_vlan) + stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); + + enh_desc = priv->plat->enh_desc; + /* To program the descriptors according to the size of the frame */ + if (enh_desc) + is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); + + if (unlikely(is_jumbo)) { + entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); + if (unlikely(entry < 0) && (entry != -EINVAL)) + goto dma_map_err; + } + + for (i = 0; i < nfrags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int len = skb_frag_size(frag); + bool last_segment = (i == (nfrags - 1)); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + WARN_ON(tx_q->tx_skbuff[entry]); + + if (likely(priv->extend_desc)) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[entry].basic; + else + desc = tx_q->dma_tx + entry; + + des = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; /* should reuse desc w/o issues */ + + tx_q->tx_skbuff_dma[entry].buf = des; + + stmmac_set_desc_addr(priv, desc, des); + + tx_q->tx_skbuff_dma[entry].map_as_page = true; + tx_q->tx_skbuff_dma[entry].len = len; + tx_q->tx_skbuff_dma[entry].last_segment = last_segment; + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; + + /* Prepare the descriptor and set the own bit too */ + stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, + priv->mode, 1, last_segment, skb->len); + } + + /* Only the last descriptor gets to point to the skb. */ + tx_q->tx_skbuff[entry] = skb; + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; + + /* According to the coalesce parameter the IC bit for the latest + * segment is reset and the timer re-started to clean the tx status. + * This approach takes care about the fragments: desc is the first + * element in case of no SG. + */ + tx_packets = (entry + 1) - first_tx; + tx_q->tx_count_frames += tx_packets; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) + set_ic = true; + else if (!priv->tx_coal_frames[queue]) + set_ic = false; + else if (tx_packets > priv->tx_coal_frames[queue]) + set_ic = true; + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + if (likely(priv->extend_desc)) + desc = &tx_q->dma_etx[entry].basic; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[entry].basic; + else + desc = &tx_q->dma_tx[entry]; + + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, desc); + priv->xstats.tx_set_ic_bit++; + } + + /* We've used all descriptors we need for this skb, however, + * advance cur_tx so that it references a fresh descriptor. + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + tx_q->cur_tx = entry; + + if (netif_msg_pktdata(priv)) { + netdev_dbg(priv->dev, + "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, + entry, first, nfrags); + + netdev_dbg(priv->dev, ">>> frame to be transmitted: "); + print_pkt(skb->data, skb->len); + } + + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", + __func__); + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); + } + + dev->stats.tx_bytes += skb->len; + + if (priv->sarc_type) + stmmac_set_desc_sarc(priv, first, priv->sarc_type); + + skb_tx_timestamp(skb); + + /* Ready to fill the first descriptor and set the OWN bit w/o any + * problems because all the descriptors are actually ready to be + * passed to the DMA engine. + */ + if (likely(!is_jumbo)) { + bool last_segment = (nfrags == 0); + + des = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + + tx_q->tx_skbuff_dma[first_entry].buf = des; + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; + + stmmac_set_desc_addr(priv, first, des); + + tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; + tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + stmmac_enable_tx_timestamp(priv, first); + } + + /* Prepare the first descriptor setting the OWN bit too */ + stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, + csum_insertion, priv->mode, 0, last_segment, + skb->len); + } + + if (tx_q->tbs & STMMAC_TBS_EN) { + struct timespec64 ts = ns_to_timespec64(skb->tstamp); + + tbs_desc = &tx_q->dma_entx[first_entry]; + stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); + } + + stmmac_set_tx_owner(priv, first); + + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + stmmac_flush_tx_descriptors(priv, queue); + stmmac_tx_timer_arm(priv, queue); + + return NETDEV_TX_OK; + +dma_map_err: + netdev_err(priv->dev, "Tx DMA map failed\n"); + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) +{ + struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); + __be16 vlan_proto = veth->h_vlan_proto; + u16 vlanid; + + if ((vlan_proto == htons(ETH_P_8021Q) && + dev->features & NETIF_F_HW_VLAN_CTAG_RX) || + (vlan_proto == htons(ETH_P_8021AD) && + dev->features & NETIF_F_HW_VLAN_STAG_RX)) { + /* pop the vlan tag */ + vlanid = ntohs(veth->h_vlan_TCI); + memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); + } +} + +/** + * stmmac_rx_refill - refill used skb preallocated buffers + * @priv: driver private structure + * @queue: RX queue index + * Description : this is to reallocate the skb for the reception process + * that is based on zero-copy. + */ +static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + int dirty = stmmac_rx_dirty(priv, queue); + unsigned int entry = rx_q->dirty_rx; + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + if (priv->dma_cap.host_dma_width <= 32) + gfp |= GFP_DMA32; + + while (dirty-- > 0) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; + struct dma_desc *p; + bool use_rx_wd; + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + entry); + else + p = rx_q->dma_rx + entry; + + if (!buf->page) { + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); + if (!buf->page) + break; + } + + if (priv->sph && !buf->sec_page) { + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); + if (!buf->sec_page) + break; + + buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); + } + + buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; + + stmmac_set_desc_addr(priv, p, buf->addr); + if (priv->sph) + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); + else + stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); + stmmac_refill_desc3(priv, rx_q, p); + + rx_q->rx_count_frames++; + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) + rx_q->rx_count_frames = 0; + + use_rx_wd = !priv->rx_coal_frames[queue]; + use_rx_wd |= rx_q->rx_count_frames > 0; + if (!priv->use_riwt) + use_rx_wd = false; + + dma_wmb(); + stmmac_set_rx_owner(priv, p, use_rx_wd); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); + } + rx_q->dirty_rx = entry; + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->dirty_rx * sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); +} + +static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, + struct dma_desc *p, + int status, unsigned int len) +{ + unsigned int plen = 0, hlen = 0; + int coe = priv->hw->rx_csum; + + /* Not first descriptor, buffer is always zero */ + if (priv->sph && len) + return 0; + + /* First descriptor, get split header length */ + stmmac_get_rx_header_len(priv, p, &hlen); + if (priv->sph && hlen) { + priv->xstats.rx_split_hdr_pkt_n++; + return hlen; + } + + /* First descriptor, not last descriptor and not split header */ + if (status & rx_not_ls) + return priv->dma_conf.dma_buf_sz; + + plen = stmmac_get_rx_frame_len(priv, p, coe); + + /* First descriptor and last descriptor and not split header */ + return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); +} + +static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, + struct dma_desc *p, + int status, unsigned int len) +{ + int coe = priv->hw->rx_csum; + unsigned int plen = 0; + + /* Not split header, buffer is not available */ + if (!priv->sph) + return 0; + + /* Not last descriptor */ + if (status & rx_not_ls) + return priv->dma_conf.dma_buf_sz; + + plen = stmmac_get_rx_frame_len(priv, p, coe); + + /* Last descriptor */ + return plen - len; +} + +static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, + struct xdp_frame *xdpf, bool dma_map) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc; + dma_addr_t dma_addr; + bool set_ic; + + if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) + return STMMAC_XDP_CONSUMED; + + if (likely(priv->extend_desc)) + tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_desc = &tx_q->dma_entx[entry].basic; + else + tx_desc = tx_q->dma_tx + entry; + + if (dma_map) { + dma_addr = dma_map_single(priv->device, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, dma_addr)) + return STMMAC_XDP_CONSUMED; + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; + } else { + struct page *page = virt_to_page(xdpf->data); + + dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + + xdpf->headroom; + dma_sync_single_for_device(priv->device, dma_addr, + xdpf->len, DMA_BIDIRECTIONAL); + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; + } + + tx_q->tx_skbuff_dma[entry].buf = dma_addr; + tx_q->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].len = xdpf->len; + tx_q->tx_skbuff_dma[entry].last_segment = true; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + tx_q->xdpf[entry] = xdpf; + + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, + true, priv->mode, true, true, + xdpf->len); + + tx_q->tx_count_frames++; + + if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, tx_desc); + priv->xstats.tx_set_ic_bit++; + } + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); + tx_q->cur_tx = entry; + + return STMMAC_XDP_TX; +} + +static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= priv->plat->tx_queues_to_use) + index -= priv->plat->tx_queues_to_use; + + return index; +} + +static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, + struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + int queue; + int res; + + if (unlikely(!xdpf)) + return STMMAC_XDP_CONSUMED; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + nq = netdev_get_tx_queue(priv->dev, queue); + + __netif_tx_lock(nq, cpu); + /* Avoids TX time-out as we are sharing with slow path */ + txq_trans_cond_update(nq); + + res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); + if (res == STMMAC_XDP_TX) + stmmac_flush_tx_descriptors(priv, queue); + + __netif_tx_unlock(nq); + + return res; +} + +static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act; + int res; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: + res = STMMAC_XDP_PASS; + break; + case XDP_TX: + res = stmmac_xdp_xmit_back(priv, xdp); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(priv->dev, xdp, prog) < 0) + res = STMMAC_XDP_CONSUMED; + else + res = STMMAC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(priv->dev, prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->dev, prog, act); + fallthrough; + case XDP_DROP: + res = STMMAC_XDP_CONSUMED; + break; + } + + return res; +} + +static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(priv->xdp_prog); + if (!prog) { + res = STMMAC_XDP_PASS; + goto out; + } + + res = __stmmac_xdp_run_prog(priv, prog, xdp); +out: + return ERR_PTR(-res); +} + +static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, + int xdp_status) +{ + int cpu = smp_processor_id(); + int queue; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + + if (xdp_status & STMMAC_XDP_TX) + stmmac_tx_timer_arm(priv, queue); + + if (xdp_status & STMMAC_XDP_REDIRECT) + xdp_do_flush(); +} + +static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + skb = __napi_alloc_skb(&ch->rxtx_napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + return skb; +} + +static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, + struct dma_desc *p, struct dma_desc *np, + struct xdp_buff *xdp) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned int len = xdp->data_end - xdp->data; + enum pkt_hash_types hash_type; + int coe = priv->hw->rx_csum; + struct sk_buff *skb; + u32 hash; + + skb = stmmac_construct_skb_zc(ch, xdp); + if (!skb) { + priv->dev->stats.rx_dropped++; + return; + } + + stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) + skb_set_hash(skb, hash, hash_type); + + skb_record_rx_queue(skb, queue); + napi_gro_receive(&ch->rxtx_napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += len; +} + +static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + unsigned int entry = rx_q->dirty_rx; + struct dma_desc *rx_desc = NULL; + bool ret = true; + + budget = min(budget, stmmac_rx_dirty(priv, queue)); + + while (budget-- > 0 && entry != rx_q->cur_rx) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; + dma_addr_t dma_addr; + bool use_rx_wd; + + if (!buf->xdp) { + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) { + ret = false; + break; + } + } + + if (priv->extend_desc) + rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); + else + rx_desc = rx_q->dma_rx + entry; + + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, rx_desc, dma_addr); + stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); + stmmac_refill_desc3(priv, rx_q, rx_desc); + + rx_q->rx_count_frames++; + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) + rx_q->rx_count_frames = 0; + + use_rx_wd = !priv->rx_coal_frames[queue]; + use_rx_wd |= rx_q->rx_count_frames > 0; + if (!priv->use_riwt) + use_rx_wd = false; + + dma_wmb(); + stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); + } + + if (rx_desc) { + rx_q->dirty_rx = entry; + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->dirty_rx * sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); + } + + return ret; +} + +static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + unsigned int count = 0, error = 0, len = 0; + int dirty = stmmac_rx_dirty(priv, queue); + unsigned int next_entry = rx_q->cur_rx; + unsigned int desc_size; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + int status = 0; + + if (netif_msg_rx_status(priv)) { + void *rx_head; + + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); + if (priv->extend_desc) { + rx_head = (void *)rx_q->dma_erx; + desc_size = sizeof(struct dma_extended_desc); + } else { + rx_head = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } + + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); + } + while (count < limit) { + struct stmmac_rx_buffer *buf; + unsigned int buf1_len = 0; + struct dma_desc *np, *p; + int entry; + int res; + + if (!count && rx_q->state_saved) { + error = rx_q->state.error; + len = rx_q->state.len; + } else { + rx_q->state_saved = false; + error = 0; + len = 0; + } + + if (count >= limit) + break; + +read_again: + buf1_len = 0; + entry = next_entry; + buf = &rx_q->buf_pool[entry]; + + if (dirty >= STMMAC_RX_FILL_BATCH) { + failure = failure || + !stmmac_rx_refill_zc(priv, queue, dirty); + dirty = 0; + } + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + entry); + else + p = rx_q->dma_rx + entry; + + /* read the status of the incoming frame */ + status = stmmac_rx_status(priv, &priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + if (unlikely(status & dma_own)) + break; + + /* Prefetch the next RX descriptor */ + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, + priv->dma_conf.dma_rx_size); + next_entry = rx_q->cur_rx; + + if (priv->extend_desc) + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); + else + np = rx_q->dma_rx + next_entry; + + prefetch(np); + + /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + + if (priv->extend_desc) + stmmac_rx_extended_status(priv, &priv->dev->stats, + &priv->xstats, + rx_q->dma_erx + entry); + if (unlikely(status == discard_frame)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + error = 1; + if (!priv->hwts_rx_en) + priv->dev->stats.rx_errors++; + } + + if (unlikely(error && (status & rx_not_ls))) + goto read_again; + if (unlikely(error)) { + count++; + continue; + } + + /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ + if (likely(status & rx_not_ls)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + count++; + goto read_again; + } + + /* XDP ZC Frame only support primary buffers for now */ + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); + len += buf1_len; + + /* ACS is disabled; strip manually. */ + if (likely(!(status & rx_not_ls))) { + buf1_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } + + /* RX buffer is good and fit into a XSK pool buffer */ + buf->xdp->data_end = buf->xdp->data + buf1_len; + xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); + + prog = READ_ONCE(priv->xdp_prog); + res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); + + switch (res) { + case STMMAC_XDP_PASS: + stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); + xsk_buff_free(buf->xdp); + break; + case STMMAC_XDP_CONSUMED: + xsk_buff_free(buf->xdp); + priv->dev->stats.rx_dropped++; + break; + case STMMAC_XDP_TX: + case STMMAC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + buf->xdp = NULL; + dirty++; + count++; + } + + if (status & rx_not_ls) { + rx_q->state_saved = true; + rx_q->state.error = error; + rx_q->state.len = len; + } + + stmmac_finalize_xdp_rx(priv, xdp_status); + + priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; + + if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { + if (failure || stmmac_rx_dirty(priv, queue) > 0) + xsk_set_rx_need_wakeup(rx_q->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx_q->xsk_pool); + + return (int)count; + } + + return failure ? limit : (int)count; +} + +/** + * stmmac_rx - manage the receive process + * @priv: driver private structure + * @limit: napi bugget + * @queue: RX queue index. + * Description : this the function called by the napi poll method. + * It gets all the frames inside the ring. + */ +static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned int count = 0, error = 0, len = 0; + int status = 0, coe = priv->hw->rx_csum; + unsigned int next_entry = rx_q->cur_rx; + enum dma_data_direction dma_dir; + unsigned int desc_size; + struct sk_buff *skb = NULL; + struct xdp_buff xdp; + int xdp_status = 0; + int buf_sz; + + dma_dir = page_pool_get_dma_dir(rx_q->page_pool); + buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; + limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); + + if (netif_msg_rx_status(priv)) { + void *rx_head; + + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); + if (priv->extend_desc) { + rx_head = (void *)rx_q->dma_erx; + desc_size = sizeof(struct dma_extended_desc); + } else { + rx_head = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } + + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); + } + while (count < limit) { + unsigned int buf1_len = 0, buf2_len = 0; + enum pkt_hash_types hash_type; + struct stmmac_rx_buffer *buf; + struct dma_desc *np, *p; + int entry; + u32 hash; + + if (!count && rx_q->state_saved) { + skb = rx_q->state.skb; + error = rx_q->state.error; + len = rx_q->state.len; + } else { + rx_q->state_saved = false; + skb = NULL; + error = 0; + len = 0; + } + +read_again: + if (count >= limit) + break; + + buf1_len = 0; + buf2_len = 0; + entry = next_entry; + buf = &rx_q->buf_pool[entry]; + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + entry); + else + p = rx_q->dma_rx + entry; + + /* read the status of the incoming frame */ + status = stmmac_rx_status(priv, &priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + if (unlikely(status & dma_own)) + break; + + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, + priv->dma_conf.dma_rx_size); + next_entry = rx_q->cur_rx; + + if (priv->extend_desc) + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); + else + np = rx_q->dma_rx + next_entry; + + prefetch(np); + + if (priv->extend_desc) + stmmac_rx_extended_status(priv, &priv->dev->stats, + &priv->xstats, rx_q->dma_erx + entry); + if (unlikely(status == discard_frame)) { + page_pool_recycle_direct(rx_q->page_pool, buf->page); + buf->page = NULL; + error = 1; + if (!priv->hwts_rx_en) + priv->dev->stats.rx_errors++; + } + + if (unlikely(error && (status & rx_not_ls))) + goto read_again; + if (unlikely(error)) { + dev_kfree_skb(skb); + skb = NULL; + count++; + continue; + } + + /* Buffer is good. Go on. */ + + prefetch(page_address(buf->page) + buf->page_offset); + if (buf->sec_page) + prefetch(page_address(buf->sec_page)); + + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); + len += buf1_len; + buf2_len = stmmac_rx_buf2_len(priv, p, status, len); + len += buf2_len; + + /* ACS is disabled; strip manually. */ + if (likely(!(status & rx_not_ls))) { + if (buf2_len) { + buf2_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } else if (buf1_len) { + buf1_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } + } + + if (!skb) { + unsigned int pre_len, sync_len; + + dma_sync_single_for_cpu(priv->device, buf->addr, + buf1_len, dma_dir); + + xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq); + xdp_prepare_buff(&xdp, page_address(buf->page), + buf->page_offset, buf1_len, false); + + pre_len = xdp.data_end - xdp.data_hard_start - + buf->page_offset; + skb = stmmac_xdp_run_prog(priv, &xdp); + /* Due xdp_adjust_tail: DMA sync for_device + * cover max len CPU touch + */ + sync_len = xdp.data_end - xdp.data_hard_start - + buf->page_offset; + sync_len = max(sync_len, pre_len); + + /* For Not XDP_PASS verdict */ + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & STMMAC_XDP_CONSUMED) { + page_pool_put_page(rx_q->page_pool, + virt_to_head_page(xdp.data), + sync_len, true); + buf->page = NULL; + priv->dev->stats.rx_dropped++; + + /* Clear skb as it was set as + * status by XDP program. + */ + skb = NULL; + + if (unlikely((status & rx_not_ls))) + goto read_again; + + count++; + continue; + } else if (xdp_res & (STMMAC_XDP_TX | + STMMAC_XDP_REDIRECT)) { + xdp_status |= xdp_res; + buf->page = NULL; + skb = NULL; + count++; + continue; + } + } + } + + if (!skb) { + /* XDP program may expand or reduce tail */ + buf1_len = xdp.data_end - xdp.data; + + skb = napi_alloc_skb(&ch->rx_napi, buf1_len); + if (!skb) { + priv->dev->stats.rx_dropped++; + count++; + goto drain_data; + } + + /* XDP program may adjust header */ + skb_copy_to_linear_data(skb, xdp.data, buf1_len); + skb_put(skb, buf1_len); + + /* Data payload copied into SKB, page ready for recycle */ + page_pool_recycle_direct(rx_q->page_pool, buf->page); + buf->page = NULL; + } else if (buf1_len) { + dma_sync_single_for_cpu(priv->device, buf->addr, + buf1_len, dma_dir); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + buf->page, buf->page_offset, buf1_len, + priv->dma_conf.dma_buf_sz); + + /* Data payload appended into SKB */ + page_pool_release_page(rx_q->page_pool, buf->page); + buf->page = NULL; + } + + if (buf2_len) { + dma_sync_single_for_cpu(priv->device, buf->sec_addr, + buf2_len, dma_dir); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + buf->sec_page, 0, buf2_len, + priv->dma_conf.dma_buf_sz); + + /* Data payload appended into SKB */ + page_pool_release_page(rx_q->page_pool, buf->sec_page); + buf->sec_page = NULL; + } + +drain_data: + if (likely(status & rx_not_ls)) + goto read_again; + if (!skb) + continue; + + /* Got entire packet into SKB. Finish it. */ + + stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) + skb_set_hash(skb, hash, hash_type); + + skb_record_rx_queue(skb, queue); + napi_gro_receive(&ch->rx_napi, skb); + skb = NULL; + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += len; + count++; + } + + if (status & rx_not_ls || skb) { + rx_q->state_saved = true; + rx_q->state.skb = skb; + rx_q->state.error = error; + rx_q->state.len = len; + } + + stmmac_finalize_xdp_rx(priv, xdp_status); + + stmmac_rx_refill(priv, queue); + + priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; + + return count; +} + +static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, rx_napi); + struct stmmac_priv *priv = ch->priv_data; + u32 chan = ch->index; + int work_done; + + priv->xstats.napi_poll++; + + work_done = stmmac_rx(priv, budget, chan); + if (work_done < budget && napi_complete_done(napi, work_done)) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); + } + + return work_done; +} + +static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, tx_napi); + struct stmmac_priv *priv = ch->priv_data; + u32 chan = ch->index; + int work_done; + + priv->xstats.napi_poll++; + + work_done = stmmac_tx_clean(priv, budget, chan); + work_done = min(work_done, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } + + return work_done; +} + +static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, rxtx_napi); + struct stmmac_priv *priv = ch->priv_data; + int rx_done, tx_done, rxtx_done; + u32 chan = ch->index; + + priv->xstats.napi_poll++; + + tx_done = stmmac_tx_clean(priv, budget, chan); + tx_done = min(tx_done, budget); + + rx_done = stmmac_rx_zc(priv, budget, chan); + + rxtx_done = max(tx_done, rx_done); + + /* If either TX or RX work is not complete, return budget + * and keep pooling + */ + if (rxtx_done >= budget) + return budget; + + /* all work done, exit the polling mode */ + if (napi_complete_done(napi, rxtx_done)) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + /* Both RX and TX work done are compelte, + * so enable both RX & TX IRQs. + */ + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } + + return min(rxtx_done, budget - 1); +} + +/** + * stmmac_tx_timeout + * @dev : Pointer to net device structure + * @txqueue: the index of the hanging transmit queue + * Description: this function is called when a packet transmission fails to + * complete within a reasonable time. The driver will mark the error in the + * netdev structure and arrange for the device to be reset to a sane state + * in order to transmit a new packet. + */ +static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + stmmac_global_err(priv); +} + +/** + * stmmac_set_rx_mode - entry point for multicast addressing + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. + * Return value: + * void. + */ +static void stmmac_set_rx_mode(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + stmmac_set_filter(priv, priv->hw, dev); +} + +/** + * stmmac_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: the Maximum Transfer Unit (MTU) is used by the network layer + * to drive packet transmission. Ethernet has an MTU of 1500 octets + * (ETH_DATA_LEN). This value can be changed with ifconfig. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_change_mtu(struct net_device *dev, int new_mtu) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int txfifosz = priv->plat->tx_fifo_size; + struct stmmac_dma_conf *dma_conf; + const int mtu = new_mtu; + int ret; + + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + txfifosz /= priv->plat->tx_queues_to_use; + + if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); + return -EINVAL; + } + + new_mtu = STMMAC_ALIGN(new_mtu); + + /* If condition true, FIFO is too small or MTU too large */ + if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) + return -EINVAL; + + if (netif_running(dev)) { + netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); + /* Try to allocate the new DMA conf with the new mtu */ + dma_conf = stmmac_setup_dma_desc(priv, mtu); + if (IS_ERR(dma_conf)) { + netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", + mtu); + return PTR_ERR(dma_conf); + } + + stmmac_release(dev); + + ret = __stmmac_open(dev, dma_conf); + if (ret) { + free_dma_desc_resources(priv, dma_conf); + kfree(dma_conf); + netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); + return ret; + } + + kfree(dma_conf); + + stmmac_set_rx_mode(dev); + } + + dev->mtu = mtu; + netdev_update_features(dev); + + return 0; +} + +static netdev_features_t stmmac_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) + features &= ~NETIF_F_RXCSUM; + + if (!priv->plat->tx_coe) + features &= ~NETIF_F_CSUM_MASK; + + /* Some GMAC devices have a bugged Jumbo frame support that + * needs to have the Tx COE disabled for oversized frames + * (due to limited buffer sizes). In this case we disable + * the TX csum insertion in the TDES and not use SF. + */ + if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) + features &= ~NETIF_F_CSUM_MASK; + + /* Disable tso if asked by ethtool */ + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + if (features & NETIF_F_TSO) + priv->tso = true; + else + priv->tso = false; + } + + return features; +} + +static int stmmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + /* Keep the COE Type in case of csum is supporting */ + if (features & NETIF_F_RXCSUM) + priv->hw->rx_csum = priv->plat->rx_coe; + else + priv->hw->rx_csum = 0; + /* No check needed because rx_coe has been set before and it will be + * fixed in case of issue. + */ + stmmac_rx_ipc(priv, priv->hw); + + if (priv->sph_cap) { + bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; + u32 chan; + + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } + + return 0; +} + +static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (status == FPE_EVENT_UNKNOWN || !*hs_enable) + return; + + /* If LP has sent verify mPacket, LP is FPE capable */ + if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { + if (*lp_state < FPE_STATE_CAPABLE) + *lp_state = FPE_STATE_CAPABLE; + + /* If user has requested FPE enable, quickly response */ + if (*hs_enable) + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + fpe_cfg, + MPACKET_RESPONSE); + } + + /* If Local has sent verify mPacket, Local is FPE capable */ + if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { + if (*lo_state < FPE_STATE_CAPABLE) + *lo_state = FPE_STATE_CAPABLE; + } + + /* If LP has sent response mPacket, LP is entering FPE ON */ + if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) + *lp_state = FPE_STATE_ENTERING_ON; + + /* If Local has sent response mPacket, Local is entering FPE ON */ + if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) + *lo_state = FPE_STATE_ENTERING_ON; + + if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && + !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && + priv->fpe_wq) { + queue_work(priv->fpe_wq, &priv->fpe_task); + } +} + +static void stmmac_common_interrupt(struct stmmac_priv *priv) +{ + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queues_count; + u32 queue; + bool xmac; + + xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; + + if (priv->irq_wake) + pm_wakeup_event(priv->device, 0); + + if (priv->dma_cap.estsel) + stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, + &priv->xstats, tx_cnt); + + if (priv->dma_cap.fpesel) { + int status = stmmac_fpe_irq_status(priv, priv->ioaddr, + priv->dev); + + stmmac_fpe_event_status(priv, status); + } + + /* To handle GMAC own interrupts */ + if ((priv->plat->has_gmac) || xmac) { + int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); + + if (unlikely(status)) { + /* For LPI we need to save the tx status */ + if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) + priv->tx_path_in_lpi_mode = true; + if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) + priv->tx_path_in_lpi_mode = false; + } + + for (queue = 0; queue < queues_count; queue++) { + status = stmmac_host_mtl_irq_status(priv, priv->hw, + queue); + } + + /* PCS link status */ + if (priv->hw->pcs) { + if (priv->xstats.pcs_link) + netif_carrier_on(priv->dev); + else + netif_carrier_off(priv->dev); + } + + stmmac_timestamp_interrupt(priv, priv); + } +} + +/** + * stmmac_interrupt - main ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the main driver interrupt service routine. + * It can call: + * o DMA service routine (to manage incoming frame reception and transmission + * status) + * o Core interrupts to manage: remote wake-up, management counter, LPI + * interrupts. + */ +static irqreturn_t stmmac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* Check if a fatal error happened */ + if (stmmac_safety_feat_interrupt(priv)) + return IRQ_HANDLED; + + /* To handle Common interrupts */ + stmmac_common_interrupt(priv); + + /* To handle DMA interrupts */ + stmmac_dma_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* To handle Common interrupts */ + stmmac_common_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* Check if a fatal error happened */ + stmmac_safety_feat_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; + struct stmmac_dma_conf *dma_conf; + int chan = tx_q->queue_index; + struct stmmac_priv *priv; + int status; + + dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); + priv = container_of(dma_conf, struct stmmac_priv, dma_conf); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + status = stmmac_napi_check(priv, chan, DMA_DIR_TX); + + if (unlikely(status & tx_hard_error_bump_tc)) { + /* Try to bump up the dma threshold on this failure */ + stmmac_bump_dma_threshold(priv, chan); + } else if (unlikely(status == tx_hard_error)) { + stmmac_tx_err(priv, chan); + } + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) +{ + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; + struct stmmac_dma_conf *dma_conf; + int chan = rx_q->queue_index; + struct stmmac_priv *priv; + + dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); + priv = container_of(dma_conf, struct stmmac_priv, dma_conf); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + stmmac_napi_check(priv, chan, DMA_DIR_RX); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling receive - used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. + */ +static void stmmac_poll_controller(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + /* If adapter is down, do nothing */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return; + + if (priv->plat->multi_msi_en) { + for (i = 0; i < priv->plat->rx_queues_to_use; i++) + stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); + + for (i = 0; i < priv->plat->tx_queues_to_use; i++) + stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); + } else { + disable_irq(dev->irq); + stmmac_interrupt(dev->irq, dev); + enable_irq(dev->irq); + } +} +#endif + +/** + * stmmac_ioctl - Entry point for the Ioctl + * @dev: Device pointer. + * @rq: An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd: IOCTL command + * Description: + * Currently it supports the phy_mii_ioctl(...) and HW time stamping. + */ +static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct stmmac_priv *priv = netdev_priv (dev); + int ret = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + ret = phylink_mii_ioctl(priv->phylink, rq, cmd); + break; + case SIOCSHWTSTAMP: + ret = stmmac_hwtstamp_set(dev, rq); + break; + case SIOCGHWTSTAMP: + ret = stmmac_hwtstamp_get(dev, rq); + break; + default: + break; + } + + return ret; +} + +static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct stmmac_priv *priv = cb_priv; + int ret = -EOPNOTSUPP; + + if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) + return ret; + + __stmmac_disable_all_queues(priv); + + switch (type) { + case TC_SETUP_CLSU32: + ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); + break; + case TC_SETUP_CLSFLOWER: + ret = stmmac_tc_setup_cls(priv, priv, type_data); + break; + default: + break; + } + + stmmac_enable_all_queues(priv); + return ret; +} + +static LIST_HEAD(stmmac_block_cb_list); + +static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &stmmac_block_cb_list, + stmmac_setup_tc_block_cb, + priv, priv, true); + case TC_SETUP_QDISC_CBS: + return stmmac_tc_setup_cbs(priv, priv, type_data); + case TC_SETUP_QDISC_TAPRIO: + return stmmac_tc_setup_taprio(priv, priv, type_data); + case TC_SETUP_QDISC_ETF: + return stmmac_tc_setup_etf(priv, priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + int gso = skb_shinfo(skb)->gso_type; + + if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { + /* + * There is no way to determine the number of TSO/USO + * capable Queues. Let's use always the Queue 0 + * because if TSO/USO is supported then at least this + * one will be capable. + */ + return 0; + } + + return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; +} + +static int stmmac_set_mac_address(struct net_device *ndev, void *addr) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + int ret = 0; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + ret = eth_mac_addr(ndev, addr); + if (ret) + goto set_mac_error; + + stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); + +set_mac_error: + pm_runtime_put(priv->device); + + return ret; +} + +#ifdef CONFIG_DEBUG_FS +static struct dentry *stmmac_fs_dir; + +static void sysfs_display_ring(void *head, int size, int extend_desc, + struct seq_file *seq, dma_addr_t dma_phy_addr) +{ + int i; + struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + struct dma_desc *p = (struct dma_desc *)head; + dma_addr_t dma_addr; + + for (i = 0; i < size; i++) { + if (extend_desc) { + dma_addr = dma_phy_addr + i * sizeof(*ep); + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(ep->basic.des0), + le32_to_cpu(ep->basic.des1), + le32_to_cpu(ep->basic.des2), + le32_to_cpu(ep->basic.des3)); + ep++; + } else { + dma_addr = dma_phy_addr + i * sizeof(*p); + seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", + i, &dma_addr, + le32_to_cpu(p->des0), le32_to_cpu(p->des1), + le32_to_cpu(p->des2), le32_to_cpu(p->des3)); + p++; + } + seq_printf(seq, "\n"); + } +} + +static int stmmac_rings_status_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_count = priv->plat->rx_queues_to_use; + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; + + if ((dev->flags & IFF_UP) == 0) + return 0; + + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + + seq_printf(seq, "RX Queue %d:\n", queue); + + if (priv->extend_desc) { + seq_printf(seq, "Extended descriptor ring:\n"); + sysfs_display_ring((void *)rx_q->dma_erx, + priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); + } else { + seq_printf(seq, "Descriptor ring:\n"); + sysfs_display_ring((void *)rx_q->dma_rx, + priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); + } + } + + for (queue = 0; queue < tx_count; queue++) { + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + + seq_printf(seq, "TX Queue %d:\n", queue); + + if (priv->extend_desc) { + seq_printf(seq, "Extended descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_etx, + priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); + } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { + seq_printf(seq, "Descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_tx, + priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); + } + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); + +static int stmmac_dma_cap_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->hw_cap_support) { + seq_printf(seq, "DMA HW features not supported\n"); + return 0; + } + + seq_printf(seq, "==============================\n"); + seq_printf(seq, "\tDMA HW features\n"); + seq_printf(seq, "==============================\n"); + + seq_printf(seq, "\t10/100 Mbps: %s\n", + (priv->dma_cap.mbps_10_100) ? "Y" : "N"); + seq_printf(seq, "\t1000 Mbps: %s\n", + (priv->dma_cap.mbps_1000) ? "Y" : "N"); + seq_printf(seq, "\tHalf duplex: %s\n", + (priv->dma_cap.half_duplex) ? "Y" : "N"); + seq_printf(seq, "\tHash Filter: %s\n", + (priv->dma_cap.hash_filter) ? "Y" : "N"); + seq_printf(seq, "\tMultiple MAC address registers: %s\n", + (priv->dma_cap.multi_addr) ? "Y" : "N"); + seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", + (priv->dma_cap.pcs) ? "Y" : "N"); + seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", + (priv->dma_cap.sma_mdio) ? "Y" : "N"); + seq_printf(seq, "\tPMT Remote wake up: %s\n", + (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); + seq_printf(seq, "\tPMT Magic Frame: %s\n", + (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); + seq_printf(seq, "\tRMON module: %s\n", + (priv->dma_cap.rmon) ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", + (priv->dma_cap.time_stamp) ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", + (priv->dma_cap.atime_stamp) ? "Y" : "N"); + seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", + (priv->dma_cap.eee) ? "Y" : "N"); + seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); + seq_printf(seq, "\tChecksum Offload in TX: %s\n", + (priv->dma_cap.tx_coe) ? "Y" : "N"); + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", + (priv->dma_cap.rx_coe) ? "Y" : "N"); + } else { + seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", + (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); + seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", + (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); + } + seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", + (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); + seq_printf(seq, "\tNumber of Additional RX channel: %d\n", + priv->dma_cap.number_rx_channel); + seq_printf(seq, "\tNumber of Additional TX channel: %d\n", + priv->dma_cap.number_tx_channel); + seq_printf(seq, "\tNumber of Additional RX queues: %d\n", + priv->dma_cap.number_rx_queues); + seq_printf(seq, "\tNumber of Additional TX queues: %d\n", + priv->dma_cap.number_tx_queues); + seq_printf(seq, "\tEnhanced descriptors: %s\n", + (priv->dma_cap.enh_desc) ? "Y" : "N"); + seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); + seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); + seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); + seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); + seq_printf(seq, "\tNumber of PPS Outputs: %d\n", + priv->dma_cap.pps_out_num); + seq_printf(seq, "\tSafety Features: %s\n", + priv->dma_cap.asp ? "Y" : "N"); + seq_printf(seq, "\tFlexible RX Parser: %s\n", + priv->dma_cap.frpsel ? "Y" : "N"); + seq_printf(seq, "\tEnhanced Addressing: %d\n", + priv->dma_cap.host_dma_width); + seq_printf(seq, "\tReceive Side Scaling: %s\n", + priv->dma_cap.rssen ? "Y" : "N"); + seq_printf(seq, "\tVLAN Hash Filtering: %s\n", + priv->dma_cap.vlhash ? "Y" : "N"); + seq_printf(seq, "\tSplit Header: %s\n", + priv->dma_cap.sphen ? "Y" : "N"); + seq_printf(seq, "\tVLAN TX Insertion: %s\n", + priv->dma_cap.vlins ? "Y" : "N"); + seq_printf(seq, "\tDouble VLAN: %s\n", + priv->dma_cap.dvlan ? "Y" : "N"); + seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", + priv->dma_cap.l3l4fnum); + seq_printf(seq, "\tARP Offloading: %s\n", + priv->dma_cap.arpoffsel ? "Y" : "N"); + seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", + priv->dma_cap.estsel ? "Y" : "N"); + seq_printf(seq, "\tFrame Preemption (FPE): %s\n", + priv->dma_cap.fpesel ? "Y" : "N"); + seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", + priv->dma_cap.tbssel ? "Y" : "N"); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); + +/* Use network device events to rename debugfs file entries. + */ +static int stmmac_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct stmmac_priv *priv = netdev_priv(dev); + + if (dev->netdev_ops != &stmmac_netdev_ops) + goto done; + + switch (event) { + case NETDEV_CHANGENAME: + if (priv->dbgfs_dir) + priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, + priv->dbgfs_dir, + stmmac_fs_dir, + dev->name); + break; + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block stmmac_notifier = { + .notifier_call = stmmac_device_event, +}; + +static void stmmac_init_fs(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + rtnl_lock(); + + /* Create per netdev entries */ + priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); + + /* Entry to report DMA RX/TX rings */ + debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, + &stmmac_rings_status_fops); + + /* Entry to report the DMA HW features */ + debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, + &stmmac_dma_cap_fops); + + rtnl_unlock(); +} + +static void stmmac_exit_fs(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + debugfs_remove_recursive(priv->dbgfs_dir); +} +#endif /* CONFIG_DEBUG_FS */ + +static u32 stmmac_vid_crc32_le(__le16 vid_le) +{ + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + u32 crc = ~0x0; + u32 temp = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= 0xedb88320; + } + + return crc; +} + +static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) +{ + u32 crc, hash = 0; + __le16 pmatch = 0; + int count = 0; + u16 vid = 0; + + for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { + __le16 vid_le = cpu_to_le16(vid); + crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; + hash |= (1 << crc); + count++; + } + + if (!priv->dma_cap.vlhash) { + if (count > 2) /* VID = 0 always passes filter */ + return -EOPNOTSUPP; + + pmatch = cpu_to_le16(vid); + hash = 0; + } + + return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); +} + +static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + bool is_double = false; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + if (be16_to_cpu(proto) == ETH_P_8021AD) + is_double = true; + + set_bit(vid, priv->active_vlans); + ret = stmmac_vlan_update(priv, is_double); + if (ret) { + clear_bit(vid, priv->active_vlans); + goto err_pm_put; + } + + if (priv->hw->num_vlan) { + ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (ret) + goto err_pm_put; + } +err_pm_put: + pm_runtime_put(priv->device); + + return ret; +} + +static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + bool is_double = false; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + if (be16_to_cpu(proto) == ETH_P_8021AD) + is_double = true; + + clear_bit(vid, priv->active_vlans); + + if (priv->hw->num_vlan) { + ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (ret) + goto del_vlan_error; + } + + ret = stmmac_vlan_update(priv, is_double); + +del_vlan_error: + pm_runtime_put(priv->device); + + return ret; +} + +static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + int i, nxmit = 0; + int queue; + + if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + nq = netdev_get_tx_queue(priv->dev, queue); + + __netif_tx_lock(nq, cpu); + /* Avoids TX time-out as we are sharing with slow path */ + txq_trans_cond_update(nq); + + for (i = 0; i < num_frames; i++) { + int res; + + res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); + if (res == STMMAC_XDP_CONSUMED) + break; + + nxmit++; + } + + if (flags & XDP_XMIT_FLUSH) { + stmmac_flush_tx_descriptors(priv, queue); + stmmac_tx_timer_arm(priv, queue); + } + + __netif_tx_unlock(nq); + + return nxmit; +} + +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_rx_dma(priv, queue); + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); +} + +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + u32 buf_size; + int ret; + + ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc RX desc.\n"); + return; + } + + ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); + if (ret) { + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); + netdev_err(priv->dev, "Failed to init RX desc.\n"); + return; + } + + stmmac_reset_rx_queue(priv, queue); + stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, rx_q->queue_index); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, rx_q->queue_index); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_conf.dma_buf_sz, + rx_q->queue_index); + } + + stmmac_start_rx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); +} + +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_tx_dma(priv, queue); + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); +} + +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + int ret; + + ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc TX desc.\n"); + return; + } + + ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); + if (ret) { + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); + netdev_err(priv->dev, "Failed to init TX desc.\n"); + return; + } + + stmmac_reset_tx_queue(priv, queue); + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, tx_q->queue_index); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, tx_q->queue_index); + + stmmac_start_tx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); +} + +void stmmac_xdp_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; + + /* Ensure tx function is not running */ + netif_tx_disable(dev); + + /* Disable NAPI process */ + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + + /* Free the IRQ lines */ + stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); + + /* Stop TX/RX DMA channels */ + stmmac_stop_all_dma(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv, &priv->dma_conf); + + /* Disable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, false); + + /* set trans_start so we don't get spurious + * watchdogs during reset + */ + netif_trans_update(dev); + netif_carrier_off(dev); +} + +int stmmac_xdp_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_cnt, tx_cnt); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + u32 buf_size; + bool sph_en; + u32 chan; + int ret; + + ret = alloc_dma_desc_resources(priv, &priv->dma_conf); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors allocation failed\n", + __func__); + goto dma_desc_error; + } + + ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors initialization failed\n", + __func__); + goto init_error; + } + + stmmac_reset_queues_param(priv); + + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) { + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + } + + /* Adjust Split header */ + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_cnt; chan++) { + rx_q = &priv->dma_conf.rx_queue[chan]; + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_conf.dma_buf_sz, + rx_q->queue_index); + } + + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } + + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_cnt; chan++) { + tx_q = &priv->dma_conf.tx_queue[chan]; + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); + + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + tx_q->txtimer.function = stmmac_tx_timer; + } + + /* Enable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, true); + + /* Start Rx & Tx DMA Channels */ + stmmac_start_all_dma(priv); + + ret = stmmac_request_irq(dev); + if (ret) + goto irq_error; + + /* Enable NAPI process*/ + stmmac_enable_all_queues(priv); + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + stmmac_enable_all_dma_irq(priv); + + return 0; + +irq_error: + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + + stmmac_hw_teardown(dev); +init_error: + free_dma_desc_resources(priv, &priv->dma_conf); +dma_desc_error: + return ret; +} + +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + struct stmmac_channel *ch; + + if (test_bit(STMMAC_DOWN, &priv->state) || + !netif_carrier_ok(priv->dev)) + return -ENETDOWN; + + if (!stmmac_xdp_is_enabled(priv)) + return -EINVAL; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + rx_q = &priv->dma_conf.rx_queue[queue]; + tx_q = &priv->dma_conf.tx_queue[queue]; + ch = &priv->channel[queue]; + + if (!rx_q->xsk_pool && !tx_q->xsk_pool) + return -EINVAL; + + if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { + /* EQoS does not have per-DMA channel SW interrupt, + * so we schedule RX Napi straight-away. + */ + if (likely(napi_schedule_prep(&ch->rxtx_napi))) + __napi_schedule(&ch->rxtx_napi); + } + + return 0; +} + +static const struct net_device_ops stmmac_netdev_ops = { + .ndo_open = stmmac_open, + .ndo_start_xmit = stmmac_xmit, + .ndo_stop = stmmac_release, + .ndo_change_mtu = stmmac_change_mtu, + .ndo_fix_features = stmmac_fix_features, + .ndo_set_features = stmmac_set_features, + .ndo_set_rx_mode = stmmac_set_rx_mode, + .ndo_tx_timeout = stmmac_tx_timeout, + .ndo_eth_ioctl = stmmac_ioctl, + .ndo_setup_tc = stmmac_setup_tc, + .ndo_select_queue = stmmac_select_queue, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = stmmac_poll_controller, +#endif + .ndo_set_mac_address = stmmac_set_mac_address, + .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, + .ndo_bpf = stmmac_bpf, + .ndo_xdp_xmit = stmmac_xdp_xmit, + .ndo_xsk_wakeup = stmmac_xsk_wakeup, +}; + +static void stmmac_reset_subtask(struct stmmac_priv *priv) +{ + if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) + return; + if (test_bit(STMMAC_DOWN, &priv->state)) + return; + + netdev_err(priv->dev, "Reset adapter.\n"); + + rtnl_lock(); + netif_trans_update(priv->dev); + while (test_and_set_bit(STMMAC_RESETING, &priv->state)) + usleep_range(1000, 2000); + + set_bit(STMMAC_DOWN, &priv->state); + dev_close(priv->dev); + dev_open(priv->dev, NULL); + clear_bit(STMMAC_DOWN, &priv->state); + clear_bit(STMMAC_RESETING, &priv->state); + rtnl_unlock(); +} + +static void stmmac_service_task(struct work_struct *work) +{ + struct stmmac_priv *priv = container_of(work, struct stmmac_priv, + service_task); + + stmmac_reset_subtask(priv); + clear_bit(STMMAC_SERVICE_SCHED, &priv->state); +} + +/** + * stmmac_hw_init - Init the MAC device + * @priv: driver private structure + * Description: this function is to configure the MAC device according to + * some platform parameters or the HW capability register. It prepares the + * driver to use either ring or chain modes and to setup either enhanced or + * normal descriptors. + */ +static int stmmac_hw_init(struct stmmac_priv *priv) +{ + int ret; + + /* dwmac-sun8i only work in chain mode */ + if (priv->plat->has_sun8i) + chain_mode = 1; + priv->chain_mode = chain_mode; + + /* Initialize HW Interface */ + ret = stmmac_hwif_init(priv); + if (ret) + return ret; + + /* Get the HW capability (new GMAC newer than 3.50a) */ + priv->hw_cap_support = stmmac_get_hw_features(priv); + if (priv->hw_cap_support) { + dev_info(priv->device, "DMA HW capability register supported\n"); + + /* We can override some gmac/dma configuration fields: e.g. + * enh_desc, tx_coe (e.g. that are passed through the + * platform) with the values from the HW capability + * register (if supported). + */ + priv->plat->enh_desc = priv->dma_cap.enh_desc; + priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && + !priv->plat->use_phy_wol; + priv->hw->pmt = priv->plat->pmt; + if (priv->dma_cap.hash_tb_sz) { + priv->hw->multicast_filter_bins = + (BIT(priv->dma_cap.hash_tb_sz) << 5); + priv->hw->mcast_bits_log2 = + ilog2(priv->hw->multicast_filter_bins); + } + + /* TXCOE doesn't work in thresh DMA mode */ + if (priv->plat->force_thresh_dma_mode) + priv->plat->tx_coe = 0; + else + priv->plat->tx_coe = priv->dma_cap.tx_coe; + + /* In case of GMAC4 rx_coe is from HW cap register. */ + priv->plat->rx_coe = priv->dma_cap.rx_coe; + + if (priv->dma_cap.rx_coe_type2) + priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; + else if (priv->dma_cap.rx_coe_type1) + priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; + + } else { + dev_info(priv->device, "No HW DMA feature register supported\n"); + } + + if (priv->plat->rx_coe) { + priv->hw->rx_csum = priv->plat->rx_coe; + dev_info(priv->device, "RX Checksum Offload Engine supported\n"); + if (priv->synopsys_id < DWMAC_CORE_4_00) + dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); + } + if (priv->plat->tx_coe) + dev_info(priv->device, "TX Checksum insertion supported\n"); + + if (priv->plat->pmt) { + dev_info(priv->device, "Wake-Up On Lan supported\n"); + device_set_wakeup_capable(priv->device, 1); + } + + if (priv->dma_cap.tsoen) + dev_info(priv->device, "TSO supported\n"); + + priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; + priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; + + /* Run HW quirks, if any */ + if (priv->hwif_quirks) { + ret = priv->hwif_quirks(priv); + if (ret) + return ret; + } + + /* Rx Watchdog is available in the COREs newer than the 3.40. + * In some case, for example on bugged HW this feature + * has to be disable and this can be done by passing the + * riwt_off field from the platform. + */ + if (((priv->synopsys_id >= DWMAC_CORE_3_50) || + (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { + priv->use_riwt = 1; + dev_info(priv->device, + "Enable RX Mitigation via HW Watchdog Timer\n"); + } + + return 0; +} + +static void stmmac_napi_add(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 queue, maxq; + + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + ch->priv_data = priv; + ch->index = queue; + spin_lock_init(&ch->lock); + + if (queue < priv->plat->rx_queues_to_use) { + netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); + } + if (queue < priv->plat->tx_queues_to_use) { + netif_napi_add_tx(dev, &ch->tx_napi, + stmmac_napi_poll_tx); + } + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_add(dev, &ch->rxtx_napi, + stmmac_napi_poll_rxtx); + } + } +} + +static void stmmac_napi_del(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 queue, maxq; + + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); + + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + if (queue < priv->plat->rx_queues_to_use) + netif_napi_del(&ch->rx_napi); + if (queue < priv->plat->tx_queues_to_use) + netif_napi_del(&ch->tx_napi); + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_del(&ch->rxtx_napi); + } + } +} + +int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0, i; + + if (netif_running(dev)) + stmmac_release(dev); + + stmmac_napi_del(dev); + + priv->plat->rx_queues_to_use = rx_cnt; + priv->plat->tx_queues_to_use = tx_cnt; + if (!netif_is_rxfh_configured(dev)) + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + priv->rss.table[i] = ethtool_rxfh_indir_default(i, + rx_cnt); + + stmmac_napi_add(dev); + + if (netif_running(dev)) + ret = stmmac_open(dev); + + return ret; +} + +int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + if (netif_running(dev)) + stmmac_release(dev); + + priv->dma_conf.dma_rx_size = rx_size; + priv->dma_conf.dma_tx_size = tx_size; + + if (netif_running(dev)) + ret = stmmac_open(dev); + + return ret; +} + +#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" +static void stmmac_fpe_lp_task(struct work_struct *work) +{ + struct stmmac_priv *priv = container_of(work, struct stmmac_priv, + fpe_task); + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + bool *enable = &fpe_cfg->enable; + int retries = 20; + + while (retries-- > 0) { + /* Bail out immediately if FPE handshake is OFF */ + if (*lo_state == FPE_STATE_OFF || !*hs_enable) + break; + + if (*lo_state == FPE_STATE_ENTERING_ON && + *lp_state == FPE_STATE_ENTERING_ON) { + stmmac_fpe_configure(priv, priv->ioaddr, + fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + *enable); + + netdev_info(priv->dev, "configured FPE\n"); + + *lo_state = FPE_STATE_ON; + *lp_state = FPE_STATE_ON; + netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); + break; + } + + if ((*lo_state == FPE_STATE_CAPABLE || + *lo_state == FPE_STATE_ENTERING_ON) && + *lp_state != FPE_STATE_ON) { + netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, + *lo_state, *lp_state); + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + fpe_cfg, + MPACKET_VERIFY); + } + /* Sleep then retry */ + msleep(500); + } + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); +} + +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) +{ + if (priv->plat->fpe_cfg->hs_enable != enable) { + if (enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + priv->plat->fpe_cfg, + MPACKET_VERIFY); + } else { + priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; + priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; + } + + priv->plat->fpe_cfg->hs_enable = enable; + } +} + +/** + * stmmac_dvr_probe + * @device: device pointer + * @plat_dat: platform data pointer + * @res: stmmac resource pointer + * Description: this is the main probe function used to + * call the alloc_etherdev, allocate the priv structure. + * Return: + * returns 0 on success, otherwise errno. + */ +int stmmac_dvr_probe(struct device *device, + struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *res) +{ + struct net_device *ndev = NULL; + struct stmmac_priv *priv; + u32 rxq; + int i, ret = 0; + + ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), + MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, device); + + priv = netdev_priv(ndev); + priv->device = device; + priv->dev = ndev; + + stmmac_set_ethtool_ops(ndev); + priv->pause = pause; + priv->plat = plat_dat; + priv->ioaddr = res->addr; + priv->dev->base_addr = (unsigned long)res->addr; + priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; + + priv->dev->irq = res->irq; + priv->wol_irq = res->wol_irq; + priv->lpi_irq = res->lpi_irq; + priv->sfty_ce_irq = res->sfty_ce_irq; + priv->sfty_ue_irq = res->sfty_ue_irq; + for (i = 0; i < MTL_MAX_RX_QUEUES; i++) + priv->rx_irq[i] = res->rx_irq[i]; + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) + priv->tx_irq[i] = res->tx_irq[i]; + + if (!is_zero_ether_addr(res->mac)) + eth_hw_addr_set(priv->dev, res->mac); + + dev_set_drvdata(device, priv->dev); + + /* Verify driver arguments */ + stmmac_verify_args(); + + priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); + if (!priv->af_xdp_zc_qps) + return -ENOMEM; + + /* Allocate workqueue */ + priv->wq = create_singlethread_workqueue("stmmac_wq"); + if (!priv->wq) { + dev_err(priv->device, "failed to create workqueue\n"); + ret = -ENOMEM; + goto error_wq_init; + } + + INIT_WORK(&priv->service_task, stmmac_service_task); + + /* Initialize Link Partner FPE workqueue */ + INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); + + /* Override with kernel parameters if supplied XXX CRS XXX + * this needs to have multiple instances + */ + if ((phyaddr >= 0) && (phyaddr <= 31)) + priv->plat->phy_addr = phyaddr; + + if (priv->plat->stmmac_rst) { + ret = reset_control_assert(priv->plat->stmmac_rst); + reset_control_deassert(priv->plat->stmmac_rst); + /* Some reset controllers have only reset callback instead of + * assert + deassert callbacks pair. + */ + if (ret == -ENOTSUPP) + reset_control_reset(priv->plat->stmmac_rst); + } + + ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); + if (ret == -ENOTSUPP) + dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", + ERR_PTR(ret)); + + /* Wait a bit for the reset to take effect */ + udelay(10); + + /* Init MAC and get the capabilities */ + ret = stmmac_hw_init(priv); + if (ret) + goto error_hw_init; + + /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. + */ + if (priv->synopsys_id < DWMAC_CORE_5_20) + priv->plat->dma_cfg->dche = false; + + stmmac_check_ether_addr(priv); + + ndev->netdev_ops = &stmmac_netdev_ops; + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM; + + ret = stmmac_tc_init(priv, priv); + if (!ret) { + ndev->hw_features |= NETIF_F_HW_TC; + } + + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + if (priv->plat->has_gmac4) + ndev->hw_features |= NETIF_F_GSO_UDP_L4; + priv->tso = true; + dev_info(priv->device, "TSO feature enabled\n"); + } + + if (priv->dma_cap.sphen && !priv->plat->sph_disable) { + ndev->hw_features |= NETIF_F_GRO; + priv->sph_cap = true; + priv->sph = priv->sph_cap; + dev_info(priv->device, "SPH feature enabled\n"); + } + + /* Ideally our host DMA address width is the same as for the + * device. However, it may differ and then we have to use our + * host DMA width for allocation and the device DMA width for + * register handling. + */ + if (priv->plat->host_dma_width) + priv->dma_cap.host_dma_width = priv->plat->host_dma_width; + else + priv->dma_cap.host_dma_width = priv->dma_cap.addr64; + + if (priv->dma_cap.host_dma_width) { + ret = dma_set_mask_and_coherent(device, + DMA_BIT_MASK(priv->dma_cap.host_dma_width)); + if (!ret) { + dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", + priv->dma_cap.host_dma_width, priv->dma_cap.addr64); + + /* + * If more than 32 bits can be addressed, make sure to + * enable enhanced addressing mode. + */ + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + priv->plat->dma_cfg->eame = true; + } else { + ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); + if (ret) { + dev_err(priv->device, "Failed to set DMA Mask\n"); + goto error_hw_init; + } + + priv->dma_cap.host_dma_width = 32; + } + } + + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); +#ifdef STMMAC_VLAN_TAG_USED + /* Both mac100 and gmac support receive VLAN tag detection */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; + if (priv->dma_cap.vlhash) { + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + } + if (priv->dma_cap.vlins) { + ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; + if (priv->dma_cap.dvlan) + ndev->features |= NETIF_F_HW_VLAN_STAG_TX; + } +#endif + priv->msg_enable = netif_msg_init(debug, default_msg_level); + + /* Initialize RSS */ + rxq = priv->plat->rx_queues_to_use; + netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); + + if (priv->dma_cap.rssen && priv->plat->rss_en) + ndev->features |= NETIF_F_RXHASH; + + /* MTU range: 46 - hw-specific max */ + ndev->min_mtu = ETH_ZLEN - ETH_HLEN; + if (priv->plat->has_xgmac) + ndev->max_mtu = XGMAC_JUMBO_LEN; + else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) + ndev->max_mtu = JUMBO_LEN; + else + ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); + /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu + * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. + */ + if ((priv->plat->maxmtu < ndev->max_mtu) && + (priv->plat->maxmtu >= ndev->min_mtu)) + ndev->max_mtu = priv->plat->maxmtu; + else if (priv->plat->maxmtu < ndev->min_mtu) + dev_warn(priv->device, + "%s: warning: maxmtu having invalid value (%d)\n", + __func__, priv->plat->maxmtu); + + if (flow_ctrl) + priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ + + /* Setup channels NAPI */ + stmmac_napi_add(ndev); + + mutex_init(&priv->lock); + + /* If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be + * changed at run-time and it is fixed. Viceversa the driver'll try to + * set the MDC clock dynamically according to the csr actual + * clock input. + */ + if (priv->plat->clk_csr >= 0) + priv->clk_csr = priv->plat->clk_csr; + else + stmmac_clk_csr_set(priv); + + stmmac_check_pcs_mode(priv); + + pm_runtime_get_noresume(device); + pm_runtime_set_active(device); + if (!pm_runtime_enabled(device)) + pm_runtime_enable(device); + + if (priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) { + /* MDIO bus Registration */ + ret = stmmac_mdio_register(ndev); + if (ret < 0) { + dev_err_probe(priv->device, ret, + "%s: MDIO bus (id: %d) registration failed\n", + __func__, priv->plat->bus_id); + goto error_mdio_register; + } + } + + if (priv->plat->speed_mode_2500) + priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); + + if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { + ret = stmmac_xpcs_setup(priv->mii); + if (ret) + goto error_xpcs_setup; + } + + ret = stmmac_phy_setup(priv); + if (ret) { + netdev_err(ndev, "failed to setup phy (%d)\n", ret); + goto error_phy_setup; + } + + ret = register_netdev(ndev); + if (ret) { + dev_err(priv->device, "%s: ERROR %i registering the device\n", + __func__, ret); + goto error_netdev_register; + } + +#ifdef CONFIG_DEBUG_FS + stmmac_init_fs(ndev); +#endif + + if (priv->plat->dump_debug_regs) + priv->plat->dump_debug_regs(priv->plat->bsp_priv); + + /* Let pm_runtime_put() disable the clocks. + * If CONFIG_PM is not enabled, the clocks will stay powered. + */ + pm_runtime_put(device); + + return ret; + +error_netdev_register: + phylink_destroy(priv->phylink); +error_xpcs_setup: +error_phy_setup: + if (priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); +error_mdio_register: + stmmac_napi_del(ndev); +error_hw_init: + destroy_workqueue(priv->wq); +error_wq_init: + bitmap_free(priv->af_xdp_zc_qps); + + return ret; +} +EXPORT_SYMBOL_GPL(stmmac_dvr_probe); + +/** + * stmmac_dvr_remove + * @dev: device pointer + * Description: this function resets the TX/RX processes, disables the MAC RX/TX + * changes the link status, releases the DMA descriptor rings. + */ +int stmmac_dvr_remove(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + netdev_info(priv->dev, "%s: removing driver", __func__); + + pm_runtime_get_sync(dev); + + stmmac_stop_all_dma(priv); + stmmac_mac_set(priv, priv->ioaddr, false); + netif_carrier_off(ndev); + unregister_netdev(ndev); + +#ifdef CONFIG_DEBUG_FS + stmmac_exit_fs(ndev); +#endif + phylink_destroy(priv->phylink); + if (priv->plat->stmmac_rst) + reset_control_assert(priv->plat->stmmac_rst); + reset_control_assert(priv->plat->stmmac_ahb_rst); + if (priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); + destroy_workqueue(priv->wq); + mutex_destroy(&priv->lock); + bitmap_free(priv->af_xdp_zc_qps); + + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_dvr_remove); + +/** + * stmmac_suspend - suspend callback + * @dev: device pointer + * Description: this is the function to suspend the device and it is called + * by the platform driver to stop the network queue, release the resources, + * program the PMT register (for WoL), clean and release driver resources. + */ +int stmmac_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + u32 chan; + + if (!ndev || !netif_running(ndev)) + return 0; + + mutex_lock(&priv->lock); + + netif_device_detach(ndev); + + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); + + if (priv->eee_enabled) { + priv->tx_path_in_lpi_mode = false; + del_timer_sync(&priv->eee_ctrl_timer); + } + + /* Stop TX/RX DMA */ + stmmac_stop_all_dma(priv); + + if (priv->plat->serdes_powerdown) + priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); + + /* Enable Power down mode by programming the PMT regs */ + if (device_may_wakeup(priv->device) && priv->plat->pmt) { + stmmac_pmt(priv, priv->hw, priv->wolopts); + priv->irq_wake = 1; + } else { + stmmac_mac_set(priv, priv->ioaddr, false); + pinctrl_pm_select_sleep_state(priv->device); + } + + mutex_unlock(&priv->lock); + + rtnl_lock(); + if (device_may_wakeup(priv->device) && priv->plat->pmt) { + phylink_suspend(priv->phylink, true); + } else { + if (device_may_wakeup(priv->device)) + phylink_speed_down(priv->phylink, false); + phylink_suspend(priv->phylink, false); + } + rtnl_unlock(); + + if (priv->dma_cap.fpesel) { + /* Disable FPE */ + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, false); + + stmmac_fpe_handshake(priv, false); + stmmac_fpe_stop_wq(priv); + } + + priv->speed = SPEED_UNKNOWN; + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_suspend); + +static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + + rx_q->cur_rx = 0; + rx_q->dirty_rx = 0; +} + +static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + + tx_q->cur_tx = 0; + tx_q->dirty_tx = 0; + tx_q->mss = 0; + + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); +} + +/** + * stmmac_reset_queues_param - reset queue parameters + * @priv: device pointer + */ +static void stmmac_reset_queues_param(struct stmmac_priv *priv) +{ + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + for (queue = 0; queue < rx_cnt; queue++) + stmmac_reset_rx_queue(priv, queue); + + for (queue = 0; queue < tx_cnt; queue++) + stmmac_reset_tx_queue(priv, queue); +} + +/** + * stmmac_resume - resume callback + * @dev: device pointer + * Description: when resume this function is invoked to setup the DMA and CORE + * in a usable state. + */ +int stmmac_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + /* Power Down bit, into the PM register, is cleared + * automatically as soon as a magic packet or a Wake-up frame + * is received. Anyway, it's better to manually clear + * this bit because it can generate problems while resuming + * from another devices (e.g. serial console). + */ + if (device_may_wakeup(priv->device) && priv->plat->pmt) { + mutex_lock(&priv->lock); + stmmac_pmt(priv, priv->hw, 0); + mutex_unlock(&priv->lock); + priv->irq_wake = 0; + } else { + pinctrl_pm_select_default_state(priv->device); + /* reset the phy so that it's ready */ + if (priv->mii) + stmmac_mdio_reset(priv->mii); + } + + if (priv->plat->serdes_powerup) { + ret = priv->plat->serdes_powerup(ndev, + priv->plat->bsp_priv); + + if (ret < 0) + return ret; + } + + rtnl_lock(); + if (device_may_wakeup(priv->device) && priv->plat->pmt) { + phylink_resume(priv->phylink); + } else { + phylink_resume(priv->phylink); + if (device_may_wakeup(priv->device)) + phylink_speed_up(priv->phylink); + } + rtnl_unlock(); + + rtnl_lock(); + mutex_lock(&priv->lock); + + stmmac_reset_queues_param(priv); + + stmmac_free_tx_skbufs(priv); + stmmac_clear_descriptors(priv, &priv->dma_conf); + + stmmac_hw_setup(ndev, false); + stmmac_init_coalesce(priv); + stmmac_set_rx_mode(ndev); + + stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); + + stmmac_enable_all_queues(priv); + stmmac_enable_all_dma_irq(priv); + + mutex_unlock(&priv->lock); + rtnl_unlock(); + + netif_device_attach(ndev); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_resume); + +#ifndef MODULE +static int __init stmmac_cmdline_opt(char *str) +{ + char *opt; + + if (!str || !*str) + return 1; + while ((opt = strsep(&str, ",")) != NULL) { + if (!strncmp(opt, "debug:", 6)) { + if (kstrtoint(opt + 6, 0, &debug)) + goto err; + } else if (!strncmp(opt, "phyaddr:", 8)) { + if (kstrtoint(opt + 8, 0, &phyaddr)) + goto err; + } else if (!strncmp(opt, "buf_sz:", 7)) { + if (kstrtoint(opt + 7, 0, &buf_sz)) + goto err; + } else if (!strncmp(opt, "tc:", 3)) { + if (kstrtoint(opt + 3, 0, &tc)) + goto err; + } else if (!strncmp(opt, "watchdog:", 9)) { + if (kstrtoint(opt + 9, 0, &watchdog)) + goto err; + } else if (!strncmp(opt, "flow_ctrl:", 10)) { + if (kstrtoint(opt + 10, 0, &flow_ctrl)) + goto err; + } else if (!strncmp(opt, "pause:", 6)) { + if (kstrtoint(opt + 6, 0, &pause)) + goto err; + } else if (!strncmp(opt, "eee_timer:", 10)) { + if (kstrtoint(opt + 10, 0, &eee_timer)) + goto err; + } else if (!strncmp(opt, "chain_mode:", 11)) { + if (kstrtoint(opt + 11, 0, &chain_mode)) + goto err; + } + } + return 1; + +err: + pr_err("%s: ERROR broken module parameter conversion", __func__); + return 1; +} + +__setup("stmmaceth=", stmmac_cmdline_opt); +#endif /* MODULE */ + +static int __init stmmac_init(void) +{ +#ifdef CONFIG_DEBUG_FS + /* Create debugfs main directory if it doesn't exist yet */ + if (!stmmac_fs_dir) + stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + register_netdevice_notifier(&stmmac_notifier); +#endif + + return 0; +} + +static void __exit stmmac_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + unregister_netdevice_notifier(&stmmac_notifier); + debugfs_remove_recursive(stmmac_fs_dir); +#endif +} + +module_init(stmmac_init) +module_exit(stmmac_exit) + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); +MODULE_AUTHOR("Giuseppe Cavallaro "); +MODULE_LICENSE("GPL"); diff --git a/devices/stmmac/stmmac_mdio-6.1-ethercat.c b/devices/stmmac/stmmac_mdio-6.1-ethercat.c new file mode 100644 index 00000000..77e7d33c --- /dev/null +++ b/devices/stmmac/stmmac_mdio-6.1-ethercat.c @@ -0,0 +1,584 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + STMMAC Ethernet Driver -- MDIO bus implementation + Provides Bus interface for MII registers + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Carl Shaw + Maintainer: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dwxgmac2-6.1-ethercat.h" +#include "stmmac-6.1-ethercat.h" + +#define MII_BUSY 0x00000001 +#define MII_WRITE 0x00000002 +#define MII_DATA_MASK GENMASK(15, 0) + +/* GMAC4 defines */ +#define MII_GMAC4_GOC_SHIFT 2 +#define MII_GMAC4_REG_ADDR_SHIFT 16 +#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) +#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) +#define MII_GMAC4_C45E BIT(1) + +/* XGMAC defines */ +#define MII_XGMAC_SADDR BIT(18) +#define MII_XGMAC_CMD_SHIFT 16 +#define MII_XGMAC_WRITE (1 << MII_XGMAC_CMD_SHIFT) +#define MII_XGMAC_READ (3 << MII_XGMAC_CMD_SHIFT) +#define MII_XGMAC_BUSY BIT(22) +#define MII_XGMAC_MAX_C22ADDR 3 +#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0) +#define MII_XGMAC_PA_SHIFT 16 +#define MII_XGMAC_DA_SHIFT 21 + +static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr, + int phyreg, u32 *hw_addr) +{ + u32 tmp; + + /* Set port as Clause 45 */ + tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); + tmp &= ~BIT(phyaddr); + writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); + + *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff); + *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT; + return 0; +} + +static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr, + int phyreg, u32 *hw_addr) +{ + u32 tmp; + + /* HW does not support C22 addr >= 4 */ + if (phyaddr > MII_XGMAC_MAX_C22ADDR) + return -ENODEV; + + /* Set port as Clause 22 */ + tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); + tmp &= ~MII_XGMAC_C22P_MASK; + tmp |= BIT(phyaddr); + writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); + + *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f); + return 0; +} + +static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 tmp, addr, value = MII_XGMAC_BUSY; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 30000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + if (phyreg & MII_ADDR_C45) { + phyreg &= ~MII_ADDR_C45; + + ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); + if (ret) + goto err_disable_clks; + } else { + ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); + if (ret) + goto err_disable_clks; + + value |= MII_XGMAC_SADDR; + } + + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + value |= MII_XGMAC_READ; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 30000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + /* Set the MII address register to read */ + writel(addr, priv->ioaddr + mii_address); + writel(value, priv->ioaddr + mii_data); + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 30000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + /* Read the data from the MII data register */ + ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; +} + +static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, + int phyreg, u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 addr, tmp, value = MII_XGMAC_BUSY; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 30000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + if (phyreg & MII_ADDR_C45) { + phyreg &= ~MII_ADDR_C45; + + ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); + if (ret) + goto err_disable_clks; + } else { + ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); + if (ret) + goto err_disable_clks; + + value |= MII_XGMAC_SADDR; + } + + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + value |= phydata; + value |= MII_XGMAC_WRITE; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 30000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + /* Set the MII address register to write */ + writel(addr, priv->ioaddr + mii_address); + writel(value, priv->ioaddr + mii_data); + + /* Wait until any existing MII operation is complete */ + ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 30000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; +} + +/** + * stmmac_mdio_read + * @bus: points to the mii_bus structure + * @phyaddr: MII addr + * @phyreg: MII reg + * Description: it reads data from the MII register from within the phy device. + * For the 7111 GMAC, we must set the bit 0 in the MII address register while + * accessing the PHY registers. + * Fortunately, it seems this has no drawback for the 7109 MAC. + */ +static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 value = MII_BUSY; + int data = 0; + u32 v; + + data = pm_runtime_resume_and_get(priv->device); + if (data < 0) + return data; + + value |= (phyaddr << priv->hw->mii.addr_shift) + & priv->hw->mii.addr_mask; + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + if (priv->plat->has_gmac4) { + value |= MII_GMAC4_READ; + if (phyreg & MII_ADDR_C45) { + value |= MII_GMAC4_C45E; + value &= ~priv->hw->mii.reg_mask; + value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) << + priv->hw->mii.reg_shift) & + priv->hw->mii.reg_mask; + + data |= (phyreg & MII_REGADDR_C45_MASK) << + MII_GMAC4_REG_ADDR_SHIFT; + } + } + + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 30000)) { + data = -EBUSY; + goto err_disable_clks; + } + + writel(data, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 30000)) { + data = -EBUSY; + goto err_disable_clks; + } + + /* Read the data from the MII data register */ + data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK; + +err_disable_clks: + pm_runtime_put(priv->device); + + return data; +} + +/** + * stmmac_mdio_write + * @bus: points to the mii_bus structure + * @phyaddr: MII addr + * @phyreg: MII reg + * @phydata: phy data + * Description: it writes the data into the MII register from within the device. + */ +static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + int ret, data = phydata; + u32 value = MII_BUSY; + u32 v; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + value |= (phyaddr << priv->hw->mii.addr_shift) + & priv->hw->mii.addr_mask; + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; + + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + if (priv->plat->has_gmac4) { + value |= MII_GMAC4_WRITE; + if (phyreg & MII_ADDR_C45) { + value |= MII_GMAC4_C45E; + value &= ~priv->hw->mii.reg_mask; + value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) << + priv->hw->mii.reg_shift) & + priv->hw->mii.reg_mask; + + data |= (phyreg & MII_REGADDR_C45_MASK) << + MII_GMAC4_REG_ADDR_SHIFT; + } + } else { + value |= MII_WRITE; + } + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 30000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + /* Set the MII address register to write */ + writel(data, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ + ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 30000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; +} + +/** + * stmmac_mdio_reset + * @bus: points to the mii_bus structure + * Description: reset the MII bus + */ +int stmmac_mdio_reset(struct mii_bus *bus) +{ +#if IS_ENABLED(CONFIG_STMMAC_PLATFORM) + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + +#ifdef CONFIG_OF + if (priv->device->of_node) { + struct gpio_desc *reset_gpio; + u32 delays[3] = { 0, 0, 0 }; + + reset_gpio = devm_gpiod_get_optional(priv->device, + "snps,reset", + GPIOD_OUT_LOW); + if (IS_ERR(reset_gpio)) + return PTR_ERR(reset_gpio); + + device_property_read_u32_array(priv->device, + "snps,reset-delays-us", + delays, ARRAY_SIZE(delays)); + + if (delays[0]) + msleep(DIV_ROUND_UP(delays[0], 1000)); + + gpiod_set_value_cansleep(reset_gpio, 1); + if (delays[1]) + msleep(DIV_ROUND_UP(delays[1], 1000)); + + gpiod_set_value_cansleep(reset_gpio, 0); + if (delays[2]) + msleep(DIV_ROUND_UP(delays[2], 1000)); + } +#endif + + /* This is a workaround for problems with the STE101P PHY. + * It doesn't complete its reset until at least one clock cycle + * on MDC, so perform a dummy mdio read. To be updated for GMAC4 + * if needed. + */ + if (!priv->plat->has_gmac4) + writel(0, priv->ioaddr + mii_address); +#endif + return 0; +} + +int stmmac_xpcs_setup(struct mii_bus *bus) +{ + struct net_device *ndev = bus->priv; + struct mdio_device *mdiodev; + struct stmmac_priv *priv; + struct dw_xpcs *xpcs; + int mode, addr; + + priv = netdev_priv(ndev); + mode = priv->plat->phy_interface; + + /* Try to probe the XPCS by scanning all addresses. */ + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + mdiodev = mdio_device_create(bus, addr); + if (IS_ERR(mdiodev)) + continue; + + xpcs = xpcs_create(mdiodev, mode); + if (IS_ERR_OR_NULL(xpcs)) { + mdio_device_free(mdiodev); + continue; + } + + priv->hw->xpcs = xpcs; + break; + } + + if (!priv->hw->xpcs) { + dev_warn(priv->device, "No xPCS found\n"); + return -ENODEV; + } + + return 0; +} + +/** + * stmmac_mdio_register + * @ndev: net device structure + * Description: it registers the MII bus + */ +int stmmac_mdio_register(struct net_device *ndev) +{ + int err = 0; + struct mii_bus *new_bus; + struct stmmac_priv *priv = netdev_priv(ndev); + struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); + struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; + struct device_node *mdio_node = priv->plat->mdio_node; + struct device *dev = ndev->dev.parent; + struct fwnode_handle *fixed_node; + int addr, found, max_addr; + + if (!mdio_bus_data) + return 0; + + new_bus = mdiobus_alloc(); + if (!new_bus) + return -ENOMEM; + + if (mdio_bus_data->irqs) + memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq)); + + new_bus->name = "ec_stmmac"; + + if (priv->plat->has_gmac4) + new_bus->probe_capabilities = MDIOBUS_C22_C45; + + if (priv->plat->has_xgmac) { + new_bus->read = &stmmac_xgmac2_mdio_read; + new_bus->write = &stmmac_xgmac2_mdio_write; + + /* Right now only C22 phys are supported */ + max_addr = MII_XGMAC_MAX_C22ADDR + 1; + + /* Check if DT specified an unsupported phy addr */ + if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR) + dev_err(dev, "Unsupported phy_addr (max=%d)\n", + MII_XGMAC_MAX_C22ADDR); + } else { + new_bus->read = &stmmac_mdio_read; + new_bus->write = &stmmac_mdio_write; + max_addr = PHY_MAX_ADDR; + } + + if (mdio_bus_data->needs_reset) + new_bus->reset = &stmmac_mdio_reset; + + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", + new_bus->name, priv->plat->bus_id); + new_bus->priv = ndev; + new_bus->phy_mask = mdio_bus_data->phy_mask; + new_bus->parent = priv->device; + + err = of_mdiobus_register(new_bus, mdio_node); + if (err == -ENODEV) { + err = 0; + dev_info(dev, "MDIO bus is disabled\n"); + goto bus_register_fail; + } else if (err) { + dev_err_probe(dev, err, "Cannot register the MDIO bus\n"); + goto bus_register_fail; + } + + /* Looks like we need a dummy read for XGMAC only and C45 PHYs */ + if (priv->plat->has_xgmac) + stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45); + + /* If fixed-link is set, skip PHY scanning */ + if (!fwnode) + fwnode = dev_fwnode(priv->device); + + if (fwnode) { + fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link"); + if (fixed_node) { + fwnode_handle_put(fixed_node); + goto bus_register_done; + } + } + + if (priv->plat->phy_node || mdio_node) + goto bus_register_done; + + found = 0; + for (addr = 0; addr < max_addr; addr++) { + struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); + + if (!phydev) + continue; + + /* + * If an IRQ was provided to be assigned after + * the bus probe, do it here. + */ + if (!mdio_bus_data->irqs && + (mdio_bus_data->probed_phy_irq > 0)) { + new_bus->irq[addr] = mdio_bus_data->probed_phy_irq; + phydev->irq = mdio_bus_data->probed_phy_irq; + } + + /* + * If we're going to bind the MAC to this PHY bus, + * and no PHY number was provided to the MAC, + * use the one probed here. + */ + if (priv->plat->phy_addr == -1) + priv->plat->phy_addr = addr; + + phy_attached_info(phydev); + found = 1; + } + + if (!found && !mdio_node) { + dev_warn(dev, "No PHY found\n"); + err = -ENODEV; + goto no_phy_found; + } + +bus_register_done: + priv->mii = new_bus; + + return 0; + +no_phy_found: + mdiobus_unregister(new_bus); +bus_register_fail: + mdiobus_free(new_bus); + return err; +} + +/** + * stmmac_mdio_unregister + * @ndev: net device structure + * Description: it unregisters the MII bus + */ +int stmmac_mdio_unregister(struct net_device *ndev) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + if (!priv->mii) + return 0; + + if (priv->hw->xpcs) { + mdio_device_free(priv->hw->xpcs->mdiodev); + xpcs_destroy(priv->hw->xpcs); + } + + mdiobus_unregister(priv->mii); + priv->mii->priv = NULL; + mdiobus_free(priv->mii); + priv->mii = NULL; + + return 0; +} diff --git a/devices/stmmac/stmmac_mdio-6.1-orig.c b/devices/stmmac/stmmac_mdio-6.1-orig.c new file mode 100644 index 00000000..379fc887 --- /dev/null +++ b/devices/stmmac/stmmac_mdio-6.1-orig.c @@ -0,0 +1,584 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + STMMAC Ethernet Driver -- MDIO bus implementation + Provides Bus interface for MII registers + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + + Author: Carl Shaw + Maintainer: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dwxgmac2.h" +#include "stmmac.h" + +#define MII_BUSY 0x00000001 +#define MII_WRITE 0x00000002 +#define MII_DATA_MASK GENMASK(15, 0) + +/* GMAC4 defines */ +#define MII_GMAC4_GOC_SHIFT 2 +#define MII_GMAC4_REG_ADDR_SHIFT 16 +#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) +#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) +#define MII_GMAC4_C45E BIT(1) + +/* XGMAC defines */ +#define MII_XGMAC_SADDR BIT(18) +#define MII_XGMAC_CMD_SHIFT 16 +#define MII_XGMAC_WRITE (1 << MII_XGMAC_CMD_SHIFT) +#define MII_XGMAC_READ (3 << MII_XGMAC_CMD_SHIFT) +#define MII_XGMAC_BUSY BIT(22) +#define MII_XGMAC_MAX_C22ADDR 3 +#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0) +#define MII_XGMAC_PA_SHIFT 16 +#define MII_XGMAC_DA_SHIFT 21 + +static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr, + int phyreg, u32 *hw_addr) +{ + u32 tmp; + + /* Set port as Clause 45 */ + tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); + tmp &= ~BIT(phyaddr); + writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); + + *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff); + *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT; + return 0; +} + +static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr, + int phyreg, u32 *hw_addr) +{ + u32 tmp; + + /* HW does not support C22 addr >= 4 */ + if (phyaddr > MII_XGMAC_MAX_C22ADDR) + return -ENODEV; + + /* Set port as Clause 22 */ + tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); + tmp &= ~MII_XGMAC_C22P_MASK; + tmp |= BIT(phyaddr); + writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); + + *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f); + return 0; +} + +static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 tmp, addr, value = MII_XGMAC_BUSY; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + if (phyreg & MII_ADDR_C45) { + phyreg &= ~MII_ADDR_C45; + + ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); + if (ret) + goto err_disable_clks; + } else { + ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); + if (ret) + goto err_disable_clks; + + value |= MII_XGMAC_SADDR; + } + + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + value |= MII_XGMAC_READ; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + /* Set the MII address register to read */ + writel(addr, priv->ioaddr + mii_address); + writel(value, priv->ioaddr + mii_data); + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + /* Read the data from the MII data register */ + ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; +} + +static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, + int phyreg, u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 addr, tmp, value = MII_XGMAC_BUSY; + int ret; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + if (phyreg & MII_ADDR_C45) { + phyreg &= ~MII_ADDR_C45; + + ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); + if (ret) + goto err_disable_clks; + } else { + ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); + if (ret) + goto err_disable_clks; + + value |= MII_XGMAC_SADDR; + } + + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + value |= phydata; + value |= MII_XGMAC_WRITE; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + /* Set the MII address register to write */ + writel(addr, priv->ioaddr + mii_address); + writel(value, priv->ioaddr + mii_data); + + /* Wait until any existing MII operation is complete */ + ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; +} + +/** + * stmmac_mdio_read + * @bus: points to the mii_bus structure + * @phyaddr: MII addr + * @phyreg: MII reg + * Description: it reads data from the MII register from within the phy device. + * For the 7111 GMAC, we must set the bit 0 in the MII address register while + * accessing the PHY registers. + * Fortunately, it seems this has no drawback for the 7109 MAC. + */ +static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 value = MII_BUSY; + int data = 0; + u32 v; + + data = pm_runtime_resume_and_get(priv->device); + if (data < 0) + return data; + + value |= (phyaddr << priv->hw->mii.addr_shift) + & priv->hw->mii.addr_mask; + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + if (priv->plat->has_gmac4) { + value |= MII_GMAC4_READ; + if (phyreg & MII_ADDR_C45) { + value |= MII_GMAC4_C45E; + value &= ~priv->hw->mii.reg_mask; + value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) << + priv->hw->mii.reg_shift) & + priv->hw->mii.reg_mask; + + data |= (phyreg & MII_REGADDR_C45_MASK) << + MII_GMAC4_REG_ADDR_SHIFT; + } + } + + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) { + data = -EBUSY; + goto err_disable_clks; + } + + writel(data, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) { + data = -EBUSY; + goto err_disable_clks; + } + + /* Read the data from the MII data register */ + data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK; + +err_disable_clks: + pm_runtime_put(priv->device); + + return data; +} + +/** + * stmmac_mdio_write + * @bus: points to the mii_bus structure + * @phyaddr: MII addr + * @phyreg: MII reg + * @phydata: phy data + * Description: it writes the data into the MII register from within the device. + */ +static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + int ret, data = phydata; + u32 value = MII_BUSY; + u32 v; + + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) + return ret; + + value |= (phyaddr << priv->hw->mii.addr_shift) + & priv->hw->mii.addr_mask; + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; + + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + if (priv->plat->has_gmac4) { + value |= MII_GMAC4_WRITE; + if (phyreg & MII_ADDR_C45) { + value |= MII_GMAC4_C45E; + value &= ~priv->hw->mii.reg_mask; + value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) << + priv->hw->mii.reg_shift) & + priv->hw->mii.reg_mask; + + data |= (phyreg & MII_REGADDR_C45_MASK) << + MII_GMAC4_REG_ADDR_SHIFT; + } + } else { + value |= MII_WRITE; + } + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } + + /* Set the MII address register to write */ + writel(data, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ + ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; +} + +/** + * stmmac_mdio_reset + * @bus: points to the mii_bus structure + * Description: reset the MII bus + */ +int stmmac_mdio_reset(struct mii_bus *bus) +{ +#if IS_ENABLED(CONFIG_STMMAC_PLATFORM) + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + +#ifdef CONFIG_OF + if (priv->device->of_node) { + struct gpio_desc *reset_gpio; + u32 delays[3] = { 0, 0, 0 }; + + reset_gpio = devm_gpiod_get_optional(priv->device, + "snps,reset", + GPIOD_OUT_LOW); + if (IS_ERR(reset_gpio)) + return PTR_ERR(reset_gpio); + + device_property_read_u32_array(priv->device, + "snps,reset-delays-us", + delays, ARRAY_SIZE(delays)); + + if (delays[0]) + msleep(DIV_ROUND_UP(delays[0], 1000)); + + gpiod_set_value_cansleep(reset_gpio, 1); + if (delays[1]) + msleep(DIV_ROUND_UP(delays[1], 1000)); + + gpiod_set_value_cansleep(reset_gpio, 0); + if (delays[2]) + msleep(DIV_ROUND_UP(delays[2], 1000)); + } +#endif + + /* This is a workaround for problems with the STE101P PHY. + * It doesn't complete its reset until at least one clock cycle + * on MDC, so perform a dummy mdio read. To be updated for GMAC4 + * if needed. + */ + if (!priv->plat->has_gmac4) + writel(0, priv->ioaddr + mii_address); +#endif + return 0; +} + +int stmmac_xpcs_setup(struct mii_bus *bus) +{ + struct net_device *ndev = bus->priv; + struct mdio_device *mdiodev; + struct stmmac_priv *priv; + struct dw_xpcs *xpcs; + int mode, addr; + + priv = netdev_priv(ndev); + mode = priv->plat->phy_interface; + + /* Try to probe the XPCS by scanning all addresses. */ + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + mdiodev = mdio_device_create(bus, addr); + if (IS_ERR(mdiodev)) + continue; + + xpcs = xpcs_create(mdiodev, mode); + if (IS_ERR_OR_NULL(xpcs)) { + mdio_device_free(mdiodev); + continue; + } + + priv->hw->xpcs = xpcs; + break; + } + + if (!priv->hw->xpcs) { + dev_warn(priv->device, "No xPCS found\n"); + return -ENODEV; + } + + return 0; +} + +/** + * stmmac_mdio_register + * @ndev: net device structure + * Description: it registers the MII bus + */ +int stmmac_mdio_register(struct net_device *ndev) +{ + int err = 0; + struct mii_bus *new_bus; + struct stmmac_priv *priv = netdev_priv(ndev); + struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); + struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; + struct device_node *mdio_node = priv->plat->mdio_node; + struct device *dev = ndev->dev.parent; + struct fwnode_handle *fixed_node; + int addr, found, max_addr; + + if (!mdio_bus_data) + return 0; + + new_bus = mdiobus_alloc(); + if (!new_bus) + return -ENOMEM; + + if (mdio_bus_data->irqs) + memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq)); + + new_bus->name = "stmmac"; + + if (priv->plat->has_gmac4) + new_bus->probe_capabilities = MDIOBUS_C22_C45; + + if (priv->plat->has_xgmac) { + new_bus->read = &stmmac_xgmac2_mdio_read; + new_bus->write = &stmmac_xgmac2_mdio_write; + + /* Right now only C22 phys are supported */ + max_addr = MII_XGMAC_MAX_C22ADDR + 1; + + /* Check if DT specified an unsupported phy addr */ + if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR) + dev_err(dev, "Unsupported phy_addr (max=%d)\n", + MII_XGMAC_MAX_C22ADDR); + } else { + new_bus->read = &stmmac_mdio_read; + new_bus->write = &stmmac_mdio_write; + max_addr = PHY_MAX_ADDR; + } + + if (mdio_bus_data->needs_reset) + new_bus->reset = &stmmac_mdio_reset; + + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", + new_bus->name, priv->plat->bus_id); + new_bus->priv = ndev; + new_bus->phy_mask = mdio_bus_data->phy_mask; + new_bus->parent = priv->device; + + err = of_mdiobus_register(new_bus, mdio_node); + if (err == -ENODEV) { + err = 0; + dev_info(dev, "MDIO bus is disabled\n"); + goto bus_register_fail; + } else if (err) { + dev_err_probe(dev, err, "Cannot register the MDIO bus\n"); + goto bus_register_fail; + } + + /* Looks like we need a dummy read for XGMAC only and C45 PHYs */ + if (priv->plat->has_xgmac) + stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45); + + /* If fixed-link is set, skip PHY scanning */ + if (!fwnode) + fwnode = dev_fwnode(priv->device); + + if (fwnode) { + fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link"); + if (fixed_node) { + fwnode_handle_put(fixed_node); + goto bus_register_done; + } + } + + if (priv->plat->phy_node || mdio_node) + goto bus_register_done; + + found = 0; + for (addr = 0; addr < max_addr; addr++) { + struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); + + if (!phydev) + continue; + + /* + * If an IRQ was provided to be assigned after + * the bus probe, do it here. + */ + if (!mdio_bus_data->irqs && + (mdio_bus_data->probed_phy_irq > 0)) { + new_bus->irq[addr] = mdio_bus_data->probed_phy_irq; + phydev->irq = mdio_bus_data->probed_phy_irq; + } + + /* + * If we're going to bind the MAC to this PHY bus, + * and no PHY number was provided to the MAC, + * use the one probed here. + */ + if (priv->plat->phy_addr == -1) + priv->plat->phy_addr = addr; + + phy_attached_info(phydev); + found = 1; + } + + if (!found && !mdio_node) { + dev_warn(dev, "No PHY found\n"); + err = -ENODEV; + goto no_phy_found; + } + +bus_register_done: + priv->mii = new_bus; + + return 0; + +no_phy_found: + mdiobus_unregister(new_bus); +bus_register_fail: + mdiobus_free(new_bus); + return err; +} + +/** + * stmmac_mdio_unregister + * @ndev: net device structure + * Description: it unregisters the MII bus + */ +int stmmac_mdio_unregister(struct net_device *ndev) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + if (!priv->mii) + return 0; + + if (priv->hw->xpcs) { + mdio_device_free(priv->hw->xpcs->mdiodev); + xpcs_destroy(priv->hw->xpcs); + } + + mdiobus_unregister(priv->mii); + priv->mii->priv = NULL; + mdiobus_free(priv->mii); + priv->mii = NULL; + + return 0; +} diff --git a/devices/stmmac/stmmac_pci-6.1-ethercat.c b/devices/stmmac/stmmac_pci-6.1-ethercat.c new file mode 100644 index 00000000..6e437f54 --- /dev/null +++ b/devices/stmmac/stmmac_pci-6.1-ethercat.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This contains the functions to handle the pci driver. + + Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd + + + Author: Rayagond Kokatanur + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include + +#include "stmmac-6.1-ethercat.h" + +struct stmmac_pci_info { + int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); +}; + +static void common_default_data(struct plat_stmmacenet_data *plat) +{ + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ + plat->has_gmac = 1; + plat->force_sf_dma_mode = 1; + + plat->mdio_bus_data->needs_reset = true; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + /* Set default number of RX and TX queues to use */ + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; + + /* Disable Priority config by default */ + plat->tx_queues_cfg[0].use_prio = false; + plat->rx_queues_cfg[0].use_prio = false; + + /* Disable RX queues routing by default */ + plat->rx_queues_cfg[0].pkt_route = 0x0; +} + +static int stmmac_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + /* Set common default data first */ + common_default_data(plat); + + plat->bus_id = 1; + plat->phy_addr = 0; + plat->phy_interface = PHY_INTERFACE_MODE_GMII; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + /* TODO: AXI */ + + return 0; +} + +static const struct stmmac_pci_info stmmac_pci_info = { + .setup = stmmac_default_data, +}; + +static int snps_gmac5_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + int i; + + plat->clk_csr = 5; + plat->has_gmac4 = 1; + plat->force_sf_dma_mode = 1; + plat->tso_en = 1; + plat->pmt = 1; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + /* Set default number of RX and TX queues to use */ + plat->tx_queues_to_use = 4; + plat->rx_queues_to_use = 4; + + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; + for (i = 0; i < plat->tx_queues_to_use; i++) { + plat->tx_queues_cfg[i].use_prio = false; + plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + plat->tx_queues_cfg[i].weight = 25; + if (i > 0) + plat->tx_queues_cfg[i].tbs_en = 1; + } + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + for (i = 0; i < plat->rx_queues_to_use; i++) { + plat->rx_queues_cfg[i].use_prio = false; + plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + plat->rx_queues_cfg[i].pkt_route = 0x0; + plat->rx_queues_cfg[i].chan = i; + } + + plat->bus_id = 1; + plat->phy_addr = -1; + plat->phy_interface = PHY_INTERFACE_MODE_GMII; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + + /* Axi Configuration */ + plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), GFP_KERNEL); + if (!plat->axi) + return -ENOMEM; + + plat->axi->axi_wr_osr_lmt = 31; + plat->axi->axi_rd_osr_lmt = 31; + + plat->axi->axi_fb = false; + plat->axi->axi_blen[0] = 4; + plat->axi->axi_blen[1] = 8; + plat->axi->axi_blen[2] = 16; + plat->axi->axi_blen[3] = 32; + + return 0; +} + +static const struct stmmac_pci_info snps_gmac5_pci_info = { + .setup = snps_gmac5_default_data, +}; + +/** + * stmmac_pci_probe + * + * @pdev: pci device pointer + * @id: pointer to table of device id/id's. + * + * Description: This probing function gets called for all PCI devices which + * match the ID table and are not "owned" by other driver yet. This function + * gets passed a "struct pci_dev *" for each device whose entry in the ID table + * matches the device. The probe functions returns zero when the driver choose + * to take "ownership" of the device or an error code(-ve no) otherwise. + */ +static int stmmac_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; + struct plat_stmmacenet_data *plat; + struct stmmac_resources res; + int i; + int ret; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + + plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), + GFP_KERNEL); + if (!plat->dma_cfg) + return -ENOMEM; + + plat->safety_feat_cfg = devm_kzalloc(&pdev->dev, + sizeof(*plat->safety_feat_cfg), + GFP_KERNEL); + if (!plat->safety_feat_cfg) + return -ENOMEM; + + /* Enable pci device */ + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", + __func__); + return ret; + } + + /* Get the base address of device */ + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev)); + if (ret) + return ret; + break; + } + + pci_set_master(pdev); + + ret = info->setup(pdev, plat); + if (ret) + return ret; + + memset(&res, 0, sizeof(res)); + res.addr = pcim_iomap_table(pdev)[i]; + res.wol_irq = pdev->irq; + res.irq = pdev->irq; + + plat->safety_feat_cfg->tsoee = 1; + plat->safety_feat_cfg->mrxpee = 1; + plat->safety_feat_cfg->mestee = 1; + plat->safety_feat_cfg->mrxee = 1; + plat->safety_feat_cfg->mtxee = 1; + plat->safety_feat_cfg->epsi = 1; + plat->safety_feat_cfg->edpp = 1; + plat->safety_feat_cfg->prtyen = 1; + plat->safety_feat_cfg->tmouten = 1; + + return stmmac_ec_dvr_probe(&pdev->dev, plat, &res); +} + +/** + * stmmac_pci_remove + * + * @pdev: platform device pointer + * Description: this function calls the main to free the net resources + * and releases the PCI resources. + */ +static void stmmac_pci_remove(struct pci_dev *pdev) +{ + int i; + + stmmac_ec_dvr_remove(&pdev->dev); + + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + pcim_iounmap_regions(pdev, BIT(i)); + break; + } +} + +static int __maybe_unused stmmac_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = stmmac_suspend(dev); + if (ret) + return ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, true); + return 0; +} + +static int __maybe_unused stmmac_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + return stmmac_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); + +/* synthetic ID, no official vendor */ +#define PCI_VENDOR_ID_STMMAC 0x0700 + +#define PCI_DEVICE_ID_STMMAC_STMMAC 0x1108 +#define PCI_DEVICE_ID_SYNOPSYS_GMAC5_ID 0x7102 + +static const struct pci_device_id stmmac_id_table[] = { + { PCI_DEVICE_DATA(STMMAC, STMMAC, &stmmac_pci_info) }, + { PCI_DEVICE_DATA(STMICRO, MAC, &stmmac_pci_info) }, + { PCI_DEVICE_DATA(SYNOPSYS, GMAC5_ID, &snps_gmac5_pci_info) }, + {} +}; + +//MODULE_DEVICE_TABLE(pci, stmmac_id_table); + +static struct pci_driver stmmac_pci_driver = { + .name = STMMAC_RESOURCE_NAME, + .id_table = stmmac_id_table, + .probe = stmmac_pci_probe, + .remove = stmmac_pci_remove, + .driver = { + .pm = &stmmac_pm_ops, + }, +}; + +static int __init stmmac_pci_init(void) +{ + int ret; + ret = stmmac_init(); + if (ret) + return ret; + ret = pci_register_driver(&stmmac_pci_driver); + if (ret) { + stmmac_exit(); + } + return ret; +} + +static void __exit stmmac_pci_exit(void) +{ + pci_unregister_driver(&stmmac_pci_driver); + stmmac_exit(); +} + +module_init(stmmac_pci_init); +module_exit(stmmac_pci_exit); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver (EtherCAT-enabled)"); +MODULE_AUTHOR("Rayagond Kokatanur "); +MODULE_AUTHOR("Giuseppe Cavallaro "); +MODULE_LICENSE("GPL"); diff --git a/devices/stmmac/stmmac_pci-6.1-orig.c b/devices/stmmac/stmmac_pci-6.1-orig.c new file mode 100644 index 00000000..644bb54f --- /dev/null +++ b/devices/stmmac/stmmac_pci-6.1-orig.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + This contains the functions to handle the pci driver. + + Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd + + + Author: Rayagond Kokatanur + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include + +#include "stmmac.h" + +struct stmmac_pci_info { + int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); +}; + +static void common_default_data(struct plat_stmmacenet_data *plat) +{ + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ + plat->has_gmac = 1; + plat->force_sf_dma_mode = 1; + + plat->mdio_bus_data->needs_reset = true; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + /* Set default number of RX and TX queues to use */ + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; + + /* Disable Priority config by default */ + plat->tx_queues_cfg[0].use_prio = false; + plat->rx_queues_cfg[0].use_prio = false; + + /* Disable RX queues routing by default */ + plat->rx_queues_cfg[0].pkt_route = 0x0; +} + +static int stmmac_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + /* Set common default data first */ + common_default_data(plat); + + plat->bus_id = 1; + plat->phy_addr = 0; + plat->phy_interface = PHY_INTERFACE_MODE_GMII; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + /* TODO: AXI */ + + return 0; +} + +static const struct stmmac_pci_info stmmac_pci_info = { + .setup = stmmac_default_data, +}; + +static int snps_gmac5_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + int i; + + plat->clk_csr = 5; + plat->has_gmac4 = 1; + plat->force_sf_dma_mode = 1; + plat->tso_en = 1; + plat->pmt = 1; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + + /* Set default number of RX and TX queues to use */ + plat->tx_queues_to_use = 4; + plat->rx_queues_to_use = 4; + + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; + for (i = 0; i < plat->tx_queues_to_use; i++) { + plat->tx_queues_cfg[i].use_prio = false; + plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + plat->tx_queues_cfg[i].weight = 25; + if (i > 0) + plat->tx_queues_cfg[i].tbs_en = 1; + } + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + for (i = 0; i < plat->rx_queues_to_use; i++) { + plat->rx_queues_cfg[i].use_prio = false; + plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; + plat->rx_queues_cfg[i].pkt_route = 0x0; + plat->rx_queues_cfg[i].chan = i; + } + + plat->bus_id = 1; + plat->phy_addr = -1; + plat->phy_interface = PHY_INTERFACE_MODE_GMII; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->pblx8 = true; + + /* Axi Configuration */ + plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), GFP_KERNEL); + if (!plat->axi) + return -ENOMEM; + + plat->axi->axi_wr_osr_lmt = 31; + plat->axi->axi_rd_osr_lmt = 31; + + plat->axi->axi_fb = false; + plat->axi->axi_blen[0] = 4; + plat->axi->axi_blen[1] = 8; + plat->axi->axi_blen[2] = 16; + plat->axi->axi_blen[3] = 32; + + return 0; +} + +static const struct stmmac_pci_info snps_gmac5_pci_info = { + .setup = snps_gmac5_default_data, +}; + +/** + * stmmac_pci_probe + * + * @pdev: pci device pointer + * @id: pointer to table of device id/id's. + * + * Description: This probing function gets called for all PCI devices which + * match the ID table and are not "owned" by other driver yet. This function + * gets passed a "struct pci_dev *" for each device whose entry in the ID table + * matches the device. The probe functions returns zero when the driver choose + * to take "ownership" of the device or an error code(-ve no) otherwise. + */ +static int stmmac_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; + struct plat_stmmacenet_data *plat; + struct stmmac_resources res; + int i; + int ret; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + + plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), + GFP_KERNEL); + if (!plat->dma_cfg) + return -ENOMEM; + + plat->safety_feat_cfg = devm_kzalloc(&pdev->dev, + sizeof(*plat->safety_feat_cfg), + GFP_KERNEL); + if (!plat->safety_feat_cfg) + return -ENOMEM; + + /* Enable pci device */ + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", + __func__); + return ret; + } + + /* Get the base address of device */ + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev)); + if (ret) + return ret; + break; + } + + pci_set_master(pdev); + + ret = info->setup(pdev, plat); + if (ret) + return ret; + + memset(&res, 0, sizeof(res)); + res.addr = pcim_iomap_table(pdev)[i]; + res.wol_irq = pdev->irq; + res.irq = pdev->irq; + + plat->safety_feat_cfg->tsoee = 1; + plat->safety_feat_cfg->mrxpee = 1; + plat->safety_feat_cfg->mestee = 1; + plat->safety_feat_cfg->mrxee = 1; + plat->safety_feat_cfg->mtxee = 1; + plat->safety_feat_cfg->epsi = 1; + plat->safety_feat_cfg->edpp = 1; + plat->safety_feat_cfg->prtyen = 1; + plat->safety_feat_cfg->tmouten = 1; + + return stmmac_dvr_probe(&pdev->dev, plat, &res); +} + +/** + * stmmac_pci_remove + * + * @pdev: platform device pointer + * Description: this function calls the main to free the net resources + * and releases the PCI resources. + */ +static void stmmac_pci_remove(struct pci_dev *pdev) +{ + int i; + + stmmac_dvr_remove(&pdev->dev); + + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + pcim_iounmap_regions(pdev, BIT(i)); + break; + } +} + +static int __maybe_unused stmmac_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = stmmac_suspend(dev); + if (ret) + return ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, true); + return 0; +} + +static int __maybe_unused stmmac_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + return stmmac_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); + +/* synthetic ID, no official vendor */ +#define PCI_VENDOR_ID_STMMAC 0x0700 + +#define PCI_DEVICE_ID_STMMAC_STMMAC 0x1108 +#define PCI_DEVICE_ID_SYNOPSYS_GMAC5_ID 0x7102 + +static const struct pci_device_id stmmac_id_table[] = { + { PCI_DEVICE_DATA(STMMAC, STMMAC, &stmmac_pci_info) }, + { PCI_DEVICE_DATA(STMICRO, MAC, &stmmac_pci_info) }, + { PCI_DEVICE_DATA(SYNOPSYS, GMAC5_ID, &snps_gmac5_pci_info) }, + {} +}; + +MODULE_DEVICE_TABLE(pci, stmmac_id_table); + +static struct pci_driver stmmac_pci_driver = { + .name = STMMAC_RESOURCE_NAME, + .id_table = stmmac_id_table, + .probe = stmmac_pci_probe, + .remove = stmmac_pci_remove, + .driver = { + .pm = &stmmac_pm_ops, + }, +}; + +module_pci_driver(stmmac_pci_driver); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); +MODULE_AUTHOR("Rayagond Kokatanur "); +MODULE_AUTHOR("Giuseppe Cavallaro "); +MODULE_LICENSE("GPL"); diff --git a/devices/stmmac/stmmac_pcs-6.1-ethercat.h b/devices/stmmac/stmmac_pcs-6.1-ethercat.h new file mode 100644 index 00000000..f02cf19d --- /dev/null +++ b/devices/stmmac/stmmac_pcs-6.1-ethercat.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * stmmac_pcs.h: Physical Coding Sublayer Header File + * + * Copyright (C) 2016 STMicroelectronics (R&D) Limited + * Author: Giuseppe Cavallaro + */ + +#ifndef __STMMAC_PCS_H__ +#define __STMMAC_PCS_H__ + +#include +#include +#include "common-6.1-ethercat.h" + +/* PCS registers (AN/TBI/SGMII/RGMII) offsets */ +#define GMAC_AN_CTRL(x) (x) /* AN control */ +#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */ +#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */ +#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */ +#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */ +#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */ + +/* AN Configuration defines */ +#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */ +#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */ +#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */ +#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */ +#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */ +#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */ + +/* AN Status defines */ +#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */ +#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */ +#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */ +#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */ + +/* ADV and LPA defines */ +#define GMAC_ANE_FD BIT(5) +#define GMAC_ANE_HD BIT(6) +#define GMAC_ANE_PSE GENMASK(8, 7) +#define GMAC_ANE_PSE_SHIFT 7 +#define GMAC_ANE_RFE GENMASK(13, 12) +#define GMAC_ANE_RFE_SHIFT 12 +#define GMAC_ANE_ACK BIT(14) + +/** + * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @intr_status: GMAC core interrupt status + * @x: pointer to log these events as stats + * Description: it is the ISR for PCS events: Auto-Negotiation Completed and + * Link status. + */ +static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg, + unsigned int intr_status, + struct stmmac_extra_stats *x) +{ + u32 val = readl(ioaddr + GMAC_AN_STATUS(reg)); + + if (intr_status & PCS_ANE_IRQ) { + x->irq_pcs_ane_n++; + if (val & GMAC_AN_STATUS_ANC) + pr_info("stmmac_pcs: ANE process completed\n"); + } + + if (intr_status & PCS_LINK_IRQ) { + x->irq_pcs_link_n++; + if (val & GMAC_AN_STATUS_LS) + pr_info("stmmac_pcs: Link Up\n"); + else + pr_info("stmmac_pcs: Link Down\n"); + } +} + +/** + * dwmac_rane - To restart ANE + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @restart: to restart ANE + * Description: this is to just restart the Auto-Negotiation. + */ +static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart) +{ + u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); + + if (restart) + value |= GMAC_AN_CTRL_RAN; + + writel(value, ioaddr + GMAC_AN_CTRL(reg)); +} + +/** + * dwmac_ctrl_ane - To program the AN Control Register. + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @ane: to enable the auto-negotiation + * @srgmi_ral: to manage MAC-2-MAC SGMII connections. + * @loopback: to cause the PHY to loopback tx data into rx path. + * Description: this is the main function to configure the AN control register + * and init the ANE, select loopback (usually for debugging purpose) and + * configure SGMII RAL. + */ +static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, + bool srgmi_ral, bool loopback) +{ + u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); + + /* Enable and restart the Auto-Negotiation */ + if (ane) + value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN; + + /* In case of MAC-2-MAC connection, block is configured to operate + * according to MAC conf register. + */ + if (srgmi_ral) + value |= GMAC_AN_CTRL_SGMRAL; + + if (loopback) + value |= GMAC_AN_CTRL_ELE; + + writel(value, ioaddr + GMAC_AN_CTRL(reg)); +} + +/** + * dwmac_get_adv_lp - Get ADV and LP cap + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @adv_lp: structure to store the adv,lp status + * Description: this is to expose the ANE advertisement and Link partner ability + * status to ethtool support. + */ +static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg, + struct rgmii_adv *adv_lp) +{ + u32 value = readl(ioaddr + GMAC_ANE_ADV(reg)); + + if (value & GMAC_ANE_FD) + adv_lp->duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv_lp->duplex |= DUPLEX_HALF; + + adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; + + value = readl(ioaddr + GMAC_ANE_LPA(reg)); + + if (value & GMAC_ANE_FD) + adv_lp->lp_duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv_lp->lp_duplex = DUPLEX_HALF; + + adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; +} +#endif /* __STMMAC_PCS_H__ */ diff --git a/devices/stmmac/stmmac_pcs-6.1-orig.h b/devices/stmmac/stmmac_pcs-6.1-orig.h new file mode 100644 index 00000000..aefc1214 --- /dev/null +++ b/devices/stmmac/stmmac_pcs-6.1-orig.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * stmmac_pcs.h: Physical Coding Sublayer Header File + * + * Copyright (C) 2016 STMicroelectronics (R&D) Limited + * Author: Giuseppe Cavallaro + */ + +#ifndef __STMMAC_PCS_H__ +#define __STMMAC_PCS_H__ + +#include +#include +#include "common.h" + +/* PCS registers (AN/TBI/SGMII/RGMII) offsets */ +#define GMAC_AN_CTRL(x) (x) /* AN control */ +#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */ +#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */ +#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */ +#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */ +#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */ + +/* AN Configuration defines */ +#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */ +#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */ +#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */ +#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */ +#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */ +#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */ + +/* AN Status defines */ +#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */ +#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */ +#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */ +#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */ + +/* ADV and LPA defines */ +#define GMAC_ANE_FD BIT(5) +#define GMAC_ANE_HD BIT(6) +#define GMAC_ANE_PSE GENMASK(8, 7) +#define GMAC_ANE_PSE_SHIFT 7 +#define GMAC_ANE_RFE GENMASK(13, 12) +#define GMAC_ANE_RFE_SHIFT 12 +#define GMAC_ANE_ACK BIT(14) + +/** + * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @intr_status: GMAC core interrupt status + * @x: pointer to log these events as stats + * Description: it is the ISR for PCS events: Auto-Negotiation Completed and + * Link status. + */ +static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg, + unsigned int intr_status, + struct stmmac_extra_stats *x) +{ + u32 val = readl(ioaddr + GMAC_AN_STATUS(reg)); + + if (intr_status & PCS_ANE_IRQ) { + x->irq_pcs_ane_n++; + if (val & GMAC_AN_STATUS_ANC) + pr_info("stmmac_pcs: ANE process completed\n"); + } + + if (intr_status & PCS_LINK_IRQ) { + x->irq_pcs_link_n++; + if (val & GMAC_AN_STATUS_LS) + pr_info("stmmac_pcs: Link Up\n"); + else + pr_info("stmmac_pcs: Link Down\n"); + } +} + +/** + * dwmac_rane - To restart ANE + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @restart: to restart ANE + * Description: this is to just restart the Auto-Negotiation. + */ +static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart) +{ + u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); + + if (restart) + value |= GMAC_AN_CTRL_RAN; + + writel(value, ioaddr + GMAC_AN_CTRL(reg)); +} + +/** + * dwmac_ctrl_ane - To program the AN Control Register. + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @ane: to enable the auto-negotiation + * @srgmi_ral: to manage MAC-2-MAC SGMII connections. + * @loopback: to cause the PHY to loopback tx data into rx path. + * Description: this is the main function to configure the AN control register + * and init the ANE, select loopback (usually for debugging purpose) and + * configure SGMII RAL. + */ +static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, + bool srgmi_ral, bool loopback) +{ + u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); + + /* Enable and restart the Auto-Negotiation */ + if (ane) + value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN; + + /* In case of MAC-2-MAC connection, block is configured to operate + * according to MAC conf register. + */ + if (srgmi_ral) + value |= GMAC_AN_CTRL_SGMRAL; + + if (loopback) + value |= GMAC_AN_CTRL_ELE; + + writel(value, ioaddr + GMAC_AN_CTRL(reg)); +} + +/** + * dwmac_get_adv_lp - Get ADV and LP cap + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @adv_lp: structure to store the adv,lp status + * Description: this is to expose the ANE advertisement and Link partner ability + * status to ethtool support. + */ +static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg, + struct rgmii_adv *adv_lp) +{ + u32 value = readl(ioaddr + GMAC_ANE_ADV(reg)); + + if (value & GMAC_ANE_FD) + adv_lp->duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv_lp->duplex |= DUPLEX_HALF; + + adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; + + value = readl(ioaddr + GMAC_ANE_LPA(reg)); + + if (value & GMAC_ANE_FD) + adv_lp->lp_duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv_lp->lp_duplex = DUPLEX_HALF; + + adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; +} +#endif /* __STMMAC_PCS_H__ */ diff --git a/devices/stmmac/stmmac_ptp-6.1-ethercat.c b/devices/stmmac/stmmac_ptp-6.1-ethercat.c new file mode 100644 index 00000000..b54fb4cf --- /dev/null +++ b/devices/stmmac/stmmac_ptp-6.1-ethercat.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + PTP 1588 clock using the STMMAC. + + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + + Author: Rayagond Kokatanur +*******************************************************************************/ +#include "stmmac-6.1-ethercat.h" +#include "stmmac_ptp-6.1-ethercat.h" +#include "dwmac4-6.1-ethercat.h" + +/** + * stmmac_adjust_freq + * + * @ptp: pointer to ptp_clock_info structure + * @ppb: desired period change in parts ber billion + * + * Description: this function will adjust the frequency of hardware clock. + */ +static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u32 diff, addend; + int neg_adj = 0; + u64 adj; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + addend = priv->default_addend; + adj = addend; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + addend = neg_adj ? (addend - diff) : (addend + diff); + + write_lock_irqsave(&priv->ptp_lock, flags); + stmmac_config_addend(priv, priv->ptpaddr, addend); + write_unlock_irqrestore(&priv->ptp_lock, flags); + + return 0; +} + +/** + * stmmac_adjust_time + * + * @ptp: pointer to ptp_clock_info structure + * @delta: desired change in nanoseconds + * + * Description: this function will shift/adjust the hardware clock time. + */ +static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u32 sec, nsec; + u32 quotient, reminder; + int neg_adj = 0; + bool xmac, est_rst = false; + int ret; + + xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + quotient = div_u64_rem(delta, 1000000000ULL, &reminder); + sec = quotient; + nsec = reminder; + + /* If EST is enabled, disabled it before adjust ptp time. */ + if (priv->plat->est && priv->plat->est->enable) { + est_rst = true; + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } + + write_lock_irqsave(&priv->ptp_lock, flags); + stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac); + write_unlock_irqrestore(&priv->ptp_lock, flags); + + /* Caculate new basetime and re-configured EST after PTP time adjust. */ + if (est_rst) { + struct timespec64 current_time, time; + ktime_t current_time_ns, basetime; + u64 cycle_time; + + mutex_lock(&priv->plat->est->lock); + priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); + current_time_ns = timespec64_to_ktime(current_time); + time.tv_nsec = priv->plat->est->btr_reserve[0]; + time.tv_sec = priv->plat->est->btr_reserve[1]; + basetime = timespec64_to_ktime(time); + cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC + + priv->plat->est->ctr[0]; + time = stmmac_calc_tas_basetime(basetime, + current_time_ns, + cycle_time); + + priv->plat->est->btr[0] = (u32)time.tv_nsec; + priv->plat->est->btr[1] = (u32)time.tv_sec; + priv->plat->est->enable = true; + ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + if (ret) + netdev_err(priv->dev, "failed to configure EST\n"); + } + + return 0; +} + +/** + * stmmac_get_time + * + * @ptp: pointer to ptp_clock_info structure + * @ts: pointer to hold time/result + * + * Description: this function will read the current time from the + * hardware clock and store it in @ts. + */ +static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u64 ns = 0; + + read_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_systime(priv, priv->ptpaddr, &ns); + read_unlock_irqrestore(&priv->ptp_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * stmmac_set_time + * + * @ptp: pointer to ptp_clock_info structure + * @ts: time value to set + * + * Description: this function will set the current time on the + * hardware clock. + */ +static int stmmac_set_time(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + + write_lock_irqsave(&priv->ptp_lock, flags); + stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec); + write_unlock_irqrestore(&priv->ptp_lock, flags); + + return 0; +} + +static int stmmac_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + void __iomem *ptpaddr = priv->ptpaddr; + struct stmmac_pps_cfg *cfg; + int ret = -EOPNOTSUPP; + unsigned long flags; + u32 acr_value; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + cfg = &priv->pps[rq->perout.index]; + + cfg->start.tv_sec = rq->perout.start.sec; + cfg->start.tv_nsec = rq->perout.start.nsec; + cfg->period.tv_sec = rq->perout.period.sec; + cfg->period.tv_nsec = rq->perout.period.nsec; + + write_lock_irqsave(&priv->ptp_lock, flags); + ret = stmmac_flex_pps_config(priv, priv->ioaddr, + rq->perout.index, cfg, on, + priv->sub_second_inc, + priv->systime_flags); + write_unlock_irqrestore(&priv->ptp_lock, flags); + break; + case PTP_CLK_REQ_EXTTS: + priv->plat->ext_snapshot_en = on; + mutex_lock(&priv->aux_ts_lock); + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + if (on) { + /* Enable External snapshot trigger */ + acr_value |= priv->plat->ext_snapshot_num; + acr_value |= PTP_ACR_ATSFC; + netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + } else { + netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + } + writel(acr_value, ptpaddr + PTP_ACR); + mutex_unlock(&priv->aux_ts_lock); + /* wait for auxts fifo clear to finish */ + ret = readl_poll_timeout(ptpaddr + PTP_ACR, acr_value, + !(acr_value & PTP_ACR_ATSFC), + 10, 10000); + break; + + default: + break; + } + + return ret; +} + +/** + * stmmac_get_syncdevicetime + * @device: current device time + * @system: system counter value read synchronously with device time + * @ctx: context provided by timekeeping code + * Description: Read device and system clock simultaneously and return the + * corrected clock values in ns. + **/ +static int stmmac_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + + if (priv->plat->crosststamp) + return priv->plat->crosststamp(device, system, ctx); + else + return -EOPNOTSUPP; +} + +static int stmmac_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + + return get_device_system_crosststamp(stmmac_get_syncdevicetime, + priv, NULL, xtstamp); +} + +/* structure describing a PTP hardware clock */ +static struct ptp_clock_info stmmac_ptp_clock_ops = { + .owner = THIS_MODULE, + .name = "stmmac ptp", + .max_adj = 62500000, + .n_alarm = 0, + .n_ext_ts = 0, /* will be overwritten in stmmac_ptp_register */ + .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */ + .n_pins = 0, + .pps = 0, + .adjfreq = stmmac_adjust_freq, + .adjtime = stmmac_adjust_time, + .gettime64 = stmmac_get_time, + .settime64 = stmmac_set_time, + .enable = stmmac_enable, + .getcrosststamp = stmmac_getcrosststamp, +}; + +/** + * stmmac_ptp_register + * @priv: driver private structure + * Description: this function will register the ptp clock driver + * to kernel. It also does some house keeping work. + */ +void stmmac_ptp_register(struct stmmac_priv *priv) +{ + int i; + + for (i = 0; i < priv->dma_cap.pps_out_num; i++) { + if (i >= STMMAC_PPS_MAX) + break; + priv->pps[i].available = true; + } + + if (priv->plat->ptp_max_adj) + stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; + + /* Calculate the clock domain crossing (CDC) error if necessary */ + priv->plat->cdc_error_adj = 0; + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) + priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; + + stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; + + rwlock_init(&priv->ptp_lock); + mutex_init(&priv->aux_ts_lock); + priv->ptp_clock_ops = stmmac_ptp_clock_ops; + + priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, + priv->device); + if (IS_ERR(priv->ptp_clock)) { + netdev_err(priv->dev, "ptp_clock_register failed\n"); + priv->ptp_clock = NULL; + } else if (priv->ptp_clock) + netdev_info(priv->dev, "registered PTP clock\n"); +} + +/** + * stmmac_ptp_unregister + * @priv: driver private structure + * Description: this function will remove/unregister the ptp clock driver + * from the kernel. + */ +void stmmac_ptp_unregister(struct stmmac_priv *priv) +{ + if (priv->ptp_clock) { + ptp_clock_unregister(priv->ptp_clock); + priv->ptp_clock = NULL; + pr_debug("Removed PTP HW clock successfully on %s\n", + priv->dev->name); + } + + mutex_destroy(&priv->aux_ts_lock); +} diff --git a/devices/stmmac/stmmac_ptp-6.1-ethercat.h b/devices/stmmac/stmmac_ptp-6.1-ethercat.h new file mode 100644 index 00000000..bf619295 --- /dev/null +++ b/devices/stmmac/stmmac_ptp-6.1-ethercat.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/****************************************************************************** + PTP Header file + + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + + Author: Rayagond Kokatanur +******************************************************************************/ + +#ifndef __STMMAC_PTP_H__ +#define __STMMAC_PTP_H__ + +#define PTP_XGMAC_OFFSET 0xd00 +#define PTP_GMAC4_OFFSET 0xb00 +#define PTP_GMAC3_X_OFFSET 0x700 + +/* IEEE 1588 PTP register offsets */ +#define PTP_TCR 0x00 /* Timestamp Control Reg */ +#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */ +#define PTP_STSR 0x08 /* System Time – Seconds Regr */ +#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */ +#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ +#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ +#define PTP_TAR 0x18 /* Timestamp Addend Reg */ +#define PTP_ACR 0x40 /* Auxiliary Control Reg */ +#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */ +#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */ + +#define PTP_STNSUR_ADDSUB_SHIFT 31 +#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ +#define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */ + +/* PTP Timestamp control register defines */ +#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */ +#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */ +#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */ +#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */ +#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */ +#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */ +#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */ +#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */ +/* Enable PTP packet Processing for Version 2 Format */ +#define PTP_TCR_TSVER2ENA BIT(10) +/* Enable Processing of PTP over Ethernet Frames */ +#define PTP_TCR_TSIPENA BIT(11) +/* Enable Processing of PTP Frames Sent over IPv6-UDP */ +#define PTP_TCR_TSIPV6ENA BIT(12) +/* Enable Processing of PTP Frames Sent over IPv4-UDP */ +#define PTP_TCR_TSIPV4ENA BIT(13) +/* Enable Timestamp Snapshot for Event Messages */ +#define PTP_TCR_TSEVNTENA BIT(14) +/* Enable Snapshot for Messages Relevant to Master */ +#define PTP_TCR_TSMSTRENA BIT(15) +/* Select PTP packets for Taking Snapshots + * On gmac4 specifically: + * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled. + * or + * Enable SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp, + * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled + */ +#define PTP_TCR_SNAPTYPSEL_1 BIT(16) +/* Enable MAC address for PTP Frame Filtering */ +#define PTP_TCR_TSENMACADDR BIT(18) + +/* SSIR defines */ +#define PTP_SSIR_SSINC_MAX 0xff +#define GMAC4_PTP_SSIR_SSINC_SHIFT 16 + +/* Auxiliary Control defines */ +#define PTP_ACR_ATSFC BIT(0) /* Auxiliary Snapshot FIFO Clear */ +#define PTP_ACR_ATSEN0 BIT(4) /* Auxiliary Snapshot 0 Enable */ +#define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ +#define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ +#define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ +#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */ +#define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ +#define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ +#define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ +#define PMC_ART_VALUE2 0x03 /* PMC_ART[47:32] timer value */ +#define PMC_ART_VALUE3 0x04 /* PMC_ART[63:48] timer value */ +#define GMAC4_ART_TIME_SHIFT 16 /* ART TIME 16-bits shift */ + +enum aux_snapshot { + AUX_SNAPSHOT0 = 0x10, + AUX_SNAPSHOT1 = 0x20, + AUX_SNAPSHOT2 = 0x40, + AUX_SNAPSHOT3 = 0x80, +}; + +#endif /* __STMMAC_PTP_H__ */ diff --git a/devices/stmmac/stmmac_ptp-6.1-orig.c b/devices/stmmac/stmmac_ptp-6.1-orig.c new file mode 100644 index 00000000..9c91a3dc --- /dev/null +++ b/devices/stmmac/stmmac_ptp-6.1-orig.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0-only +/******************************************************************************* + PTP 1588 clock using the STMMAC. + + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + + Author: Rayagond Kokatanur +*******************************************************************************/ +#include "stmmac.h" +#include "stmmac_ptp.h" +#include "dwmac4.h" + +/** + * stmmac_adjust_freq + * + * @ptp: pointer to ptp_clock_info structure + * @ppb: desired period change in parts ber billion + * + * Description: this function will adjust the frequency of hardware clock. + */ +static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u32 diff, addend; + int neg_adj = 0; + u64 adj; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + addend = priv->default_addend; + adj = addend; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + addend = neg_adj ? (addend - diff) : (addend + diff); + + write_lock_irqsave(&priv->ptp_lock, flags); + stmmac_config_addend(priv, priv->ptpaddr, addend); + write_unlock_irqrestore(&priv->ptp_lock, flags); + + return 0; +} + +/** + * stmmac_adjust_time + * + * @ptp: pointer to ptp_clock_info structure + * @delta: desired change in nanoseconds + * + * Description: this function will shift/adjust the hardware clock time. + */ +static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u32 sec, nsec; + u32 quotient, reminder; + int neg_adj = 0; + bool xmac, est_rst = false; + int ret; + + xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + quotient = div_u64_rem(delta, 1000000000ULL, &reminder); + sec = quotient; + nsec = reminder; + + /* If EST is enabled, disabled it before adjust ptp time. */ + if (priv->plat->est && priv->plat->est->enable) { + est_rst = true; + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } + + write_lock_irqsave(&priv->ptp_lock, flags); + stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac); + write_unlock_irqrestore(&priv->ptp_lock, flags); + + /* Caculate new basetime and re-configured EST after PTP time adjust. */ + if (est_rst) { + struct timespec64 current_time, time; + ktime_t current_time_ns, basetime; + u64 cycle_time; + + mutex_lock(&priv->plat->est->lock); + priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); + current_time_ns = timespec64_to_ktime(current_time); + time.tv_nsec = priv->plat->est->btr_reserve[0]; + time.tv_sec = priv->plat->est->btr_reserve[1]; + basetime = timespec64_to_ktime(time); + cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC + + priv->plat->est->ctr[0]; + time = stmmac_calc_tas_basetime(basetime, + current_time_ns, + cycle_time); + + priv->plat->est->btr[0] = (u32)time.tv_nsec; + priv->plat->est->btr[1] = (u32)time.tv_sec; + priv->plat->est->enable = true; + ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + if (ret) + netdev_err(priv->dev, "failed to configure EST\n"); + } + + return 0; +} + +/** + * stmmac_get_time + * + * @ptp: pointer to ptp_clock_info structure + * @ts: pointer to hold time/result + * + * Description: this function will read the current time from the + * hardware clock and store it in @ts. + */ +static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u64 ns = 0; + + read_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_systime(priv, priv->ptpaddr, &ns); + read_unlock_irqrestore(&priv->ptp_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * stmmac_set_time + * + * @ptp: pointer to ptp_clock_info structure + * @ts: time value to set + * + * Description: this function will set the current time on the + * hardware clock. + */ +static int stmmac_set_time(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + + write_lock_irqsave(&priv->ptp_lock, flags); + stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec); + write_unlock_irqrestore(&priv->ptp_lock, flags); + + return 0; +} + +static int stmmac_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + void __iomem *ptpaddr = priv->ptpaddr; + struct stmmac_pps_cfg *cfg; + int ret = -EOPNOTSUPP; + unsigned long flags; + u32 acr_value; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + cfg = &priv->pps[rq->perout.index]; + + cfg->start.tv_sec = rq->perout.start.sec; + cfg->start.tv_nsec = rq->perout.start.nsec; + cfg->period.tv_sec = rq->perout.period.sec; + cfg->period.tv_nsec = rq->perout.period.nsec; + + write_lock_irqsave(&priv->ptp_lock, flags); + ret = stmmac_flex_pps_config(priv, priv->ioaddr, + rq->perout.index, cfg, on, + priv->sub_second_inc, + priv->systime_flags); + write_unlock_irqrestore(&priv->ptp_lock, flags); + break; + case PTP_CLK_REQ_EXTTS: + priv->plat->ext_snapshot_en = on; + mutex_lock(&priv->aux_ts_lock); + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + if (on) { + /* Enable External snapshot trigger */ + acr_value |= priv->plat->ext_snapshot_num; + acr_value |= PTP_ACR_ATSFC; + netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + } else { + netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + } + writel(acr_value, ptpaddr + PTP_ACR); + mutex_unlock(&priv->aux_ts_lock); + /* wait for auxts fifo clear to finish */ + ret = readl_poll_timeout(ptpaddr + PTP_ACR, acr_value, + !(acr_value & PTP_ACR_ATSFC), + 10, 10000); + break; + + default: + break; + } + + return ret; +} + +/** + * stmmac_get_syncdevicetime + * @device: current device time + * @system: system counter value read synchronously with device time + * @ctx: context provided by timekeeping code + * Description: Read device and system clock simultaneously and return the + * corrected clock values in ns. + **/ +static int stmmac_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + + if (priv->plat->crosststamp) + return priv->plat->crosststamp(device, system, ctx); + else + return -EOPNOTSUPP; +} + +static int stmmac_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + + return get_device_system_crosststamp(stmmac_get_syncdevicetime, + priv, NULL, xtstamp); +} + +/* structure describing a PTP hardware clock */ +static struct ptp_clock_info stmmac_ptp_clock_ops = { + .owner = THIS_MODULE, + .name = "stmmac ptp", + .max_adj = 62500000, + .n_alarm = 0, + .n_ext_ts = 0, /* will be overwritten in stmmac_ptp_register */ + .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */ + .n_pins = 0, + .pps = 0, + .adjfreq = stmmac_adjust_freq, + .adjtime = stmmac_adjust_time, + .gettime64 = stmmac_get_time, + .settime64 = stmmac_set_time, + .enable = stmmac_enable, + .getcrosststamp = stmmac_getcrosststamp, +}; + +/** + * stmmac_ptp_register + * @priv: driver private structure + * Description: this function will register the ptp clock driver + * to kernel. It also does some house keeping work. + */ +void stmmac_ptp_register(struct stmmac_priv *priv) +{ + int i; + + for (i = 0; i < priv->dma_cap.pps_out_num; i++) { + if (i >= STMMAC_PPS_MAX) + break; + priv->pps[i].available = true; + } + + if (priv->plat->ptp_max_adj) + stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; + + /* Calculate the clock domain crossing (CDC) error if necessary */ + priv->plat->cdc_error_adj = 0; + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) + priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; + + stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; + + rwlock_init(&priv->ptp_lock); + mutex_init(&priv->aux_ts_lock); + priv->ptp_clock_ops = stmmac_ptp_clock_ops; + + priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, + priv->device); + if (IS_ERR(priv->ptp_clock)) { + netdev_err(priv->dev, "ptp_clock_register failed\n"); + priv->ptp_clock = NULL; + } else if (priv->ptp_clock) + netdev_info(priv->dev, "registered PTP clock\n"); +} + +/** + * stmmac_ptp_unregister + * @priv: driver private structure + * Description: this function will remove/unregister the ptp clock driver + * from the kernel. + */ +void stmmac_ptp_unregister(struct stmmac_priv *priv) +{ + if (priv->ptp_clock) { + ptp_clock_unregister(priv->ptp_clock); + priv->ptp_clock = NULL; + pr_debug("Removed PTP HW clock successfully on %s\n", + priv->dev->name); + } + + mutex_destroy(&priv->aux_ts_lock); +} diff --git a/devices/stmmac/stmmac_ptp-6.1-orig.h b/devices/stmmac/stmmac_ptp-6.1-orig.h new file mode 100644 index 00000000..bf619295 --- /dev/null +++ b/devices/stmmac/stmmac_ptp-6.1-orig.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/****************************************************************************** + PTP Header file + + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + + Author: Rayagond Kokatanur +******************************************************************************/ + +#ifndef __STMMAC_PTP_H__ +#define __STMMAC_PTP_H__ + +#define PTP_XGMAC_OFFSET 0xd00 +#define PTP_GMAC4_OFFSET 0xb00 +#define PTP_GMAC3_X_OFFSET 0x700 + +/* IEEE 1588 PTP register offsets */ +#define PTP_TCR 0x00 /* Timestamp Control Reg */ +#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */ +#define PTP_STSR 0x08 /* System Time – Seconds Regr */ +#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */ +#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ +#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ +#define PTP_TAR 0x18 /* Timestamp Addend Reg */ +#define PTP_ACR 0x40 /* Auxiliary Control Reg */ +#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */ +#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */ + +#define PTP_STNSUR_ADDSUB_SHIFT 31 +#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ +#define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */ + +/* PTP Timestamp control register defines */ +#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */ +#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */ +#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */ +#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */ +#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */ +#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */ +#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */ +#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */ +/* Enable PTP packet Processing for Version 2 Format */ +#define PTP_TCR_TSVER2ENA BIT(10) +/* Enable Processing of PTP over Ethernet Frames */ +#define PTP_TCR_TSIPENA BIT(11) +/* Enable Processing of PTP Frames Sent over IPv6-UDP */ +#define PTP_TCR_TSIPV6ENA BIT(12) +/* Enable Processing of PTP Frames Sent over IPv4-UDP */ +#define PTP_TCR_TSIPV4ENA BIT(13) +/* Enable Timestamp Snapshot for Event Messages */ +#define PTP_TCR_TSEVNTENA BIT(14) +/* Enable Snapshot for Messages Relevant to Master */ +#define PTP_TCR_TSMSTRENA BIT(15) +/* Select PTP packets for Taking Snapshots + * On gmac4 specifically: + * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled. + * or + * Enable SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp, + * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled + */ +#define PTP_TCR_SNAPTYPSEL_1 BIT(16) +/* Enable MAC address for PTP Frame Filtering */ +#define PTP_TCR_TSENMACADDR BIT(18) + +/* SSIR defines */ +#define PTP_SSIR_SSINC_MAX 0xff +#define GMAC4_PTP_SSIR_SSINC_SHIFT 16 + +/* Auxiliary Control defines */ +#define PTP_ACR_ATSFC BIT(0) /* Auxiliary Snapshot FIFO Clear */ +#define PTP_ACR_ATSEN0 BIT(4) /* Auxiliary Snapshot 0 Enable */ +#define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ +#define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ +#define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ +#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */ +#define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ +#define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ +#define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ +#define PMC_ART_VALUE2 0x03 /* PMC_ART[47:32] timer value */ +#define PMC_ART_VALUE3 0x04 /* PMC_ART[63:48] timer value */ +#define GMAC4_ART_TIME_SHIFT 16 /* ART TIME 16-bits shift */ + +enum aux_snapshot { + AUX_SNAPSHOT0 = 0x10, + AUX_SNAPSHOT1 = 0x20, + AUX_SNAPSHOT2 = 0x40, + AUX_SNAPSHOT3 = 0x80, +}; + +#endif /* __STMMAC_PTP_H__ */ diff --git a/devices/stmmac/stmmac_tc-6.1-ethercat.c b/devices/stmmac/stmmac_tc-6.1-ethercat.c new file mode 100644 index 00000000..34876778 --- /dev/null +++ b/devices/stmmac/stmmac_tc-6.1-ethercat.c @@ -0,0 +1,1115 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac TC Handling (HW only) + */ + +#include +#include +#include "common-6.1-ethercat.h" +#include "dwmac4-6.1-ethercat.h" +#include "dwmac5-6.1-ethercat.h" +#include "stmmac-6.1-ethercat.h" + +static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry) +{ + memset(entry, 0, sizeof(*entry)); + entry->in_use = true; + entry->is_last = true; + entry->is_frag = false; + entry->prio = ~0x0; + entry->handle = 0; + entry->val.match_data = 0x0; + entry->val.match_en = 0x0; + entry->val.af = 1; + entry->val.dma_ch_no = 0x0; +} + +static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls, + bool free) +{ + struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL; + u32 loc = cls->knode.handle; + int i; + + for (i = 0; i < priv->tc_entries_max; i++) { + entry = &priv->tc_entries[i]; + if (!entry->in_use && !first && free) + first = entry; + if ((entry->handle == loc) && !free && !entry->is_frag) + dup = entry; + } + + if (dup) + return dup; + if (first) { + first->handle = loc; + first->in_use = true; + + /* Reset HW values */ + memset(&first->val, 0, sizeof(first->val)); + } + + return first; +} + +static int tc_fill_actions(struct stmmac_tc_entry *entry, + struct stmmac_tc_entry *frag, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *action_entry = entry; + const struct tc_action *act; + struct tcf_exts *exts; + int i; + + exts = cls->knode.exts; + if (!tcf_exts_has_actions(exts)) + return -EINVAL; + if (frag) + action_entry = frag; + + tcf_exts_for_each_action(i, act, exts) { + /* Accept */ + if (is_tcf_gact_ok(act)) { + action_entry->val.af = 1; + break; + } + /* Drop */ + if (is_tcf_gact_shot(act)) { + action_entry->val.rf = 1; + break; + } + + /* Unsupported */ + return -EINVAL; + } + + return 0; +} + +static int tc_fill_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *entry, *frag = NULL; + struct tc_u32_sel *sel = cls->knode.sel; + u32 off, data, mask, real_off, rem; + u32 prio = cls->common.prio << 16; + int ret; + + /* Only 1 match per entry */ + if (sel->nkeys <= 0 || sel->nkeys > 1) + return -EINVAL; + + off = sel->keys[0].off << sel->offshift; + data = sel->keys[0].val; + mask = sel->keys[0].mask; + + switch (ntohs(cls->common.protocol)) { + case ETH_P_ALL: + break; + case ETH_P_IP: + off += ETH_HLEN; + break; + default: + return -EINVAL; + } + + if (off > priv->tc_off_max) + return -EINVAL; + + real_off = off / 4; + rem = off % 4; + + entry = tc_find_entry(priv, cls, true); + if (!entry) + return -EINVAL; + + if (rem) { + frag = tc_find_entry(priv, cls, true); + if (!frag) { + ret = -EINVAL; + goto err_unuse; + } + + entry->frag_ptr = frag; + entry->val.match_en = (mask << (rem * 8)) & + GENMASK(31, rem * 8); + entry->val.match_data = (data << (rem * 8)) & + GENMASK(31, rem * 8); + entry->val.frame_offset = real_off; + entry->prio = prio; + + frag->val.match_en = (mask >> (rem * 8)) & + GENMASK(rem * 8 - 1, 0); + frag->val.match_data = (data >> (rem * 8)) & + GENMASK(rem * 8 - 1, 0); + frag->val.frame_offset = real_off + 1; + frag->prio = prio; + frag->is_frag = true; + } else { + entry->frag_ptr = NULL; + entry->val.match_en = mask; + entry->val.match_data = data; + entry->val.frame_offset = real_off; + entry->prio = prio; + } + + ret = tc_fill_actions(entry, frag, cls); + if (ret) + goto err_unuse; + + return 0; + +err_unuse: + if (frag) + frag->in_use = false; + entry->in_use = false; + return ret; +} + +static void tc_unfill_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *entry; + + entry = tc_find_entry(priv, cls, false); + if (!entry) + return; + + entry->in_use = false; + if (entry->frag_ptr) { + entry = entry->frag_ptr; + entry->is_frag = false; + entry->in_use = false; + } +} + +static int tc_config_knode(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + int ret; + + ret = tc_fill_entry(priv, cls); + if (ret) + return ret; + + ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); + if (ret) + goto err_unfill; + + return 0; + +err_unfill: + tc_unfill_entry(priv, cls); + return ret; +} + +static int tc_delete_knode(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + /* Set entry and fragments as not used */ + tc_unfill_entry(priv, cls); + + return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); +} + +static int tc_setup_cls_u32(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + switch (cls->command) { + case TC_CLSU32_REPLACE_KNODE: + tc_unfill_entry(priv, cls); + fallthrough; + case TC_CLSU32_NEW_KNODE: + return tc_config_knode(priv, cls); + case TC_CLSU32_DELETE_KNODE: + return tc_delete_knode(priv, cls); + default: + return -EOPNOTSUPP; + } +} + +static int tc_rfs_init(struct stmmac_priv *priv) +{ + int i; + + priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8; + priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1; + priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1; + + for (i = 0; i < STMMAC_RFS_T_MAX; i++) + priv->rfs_entries_total += priv->rfs_entries_max[i]; + + priv->rfs_entries = devm_kcalloc(priv->device, + priv->rfs_entries_total, + sizeof(*priv->rfs_entries), + GFP_KERNEL); + if (!priv->rfs_entries) + return -ENOMEM; + + dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n", + priv->rfs_entries_total); + + return 0; +} + +static int tc_init(struct stmmac_priv *priv) +{ + struct dma_features *dma_cap = &priv->dma_cap; + unsigned int count; + int ret, i; + + if (dma_cap->l3l4fnum) { + priv->flow_entries_max = dma_cap->l3l4fnum; + priv->flow_entries = devm_kcalloc(priv->device, + dma_cap->l3l4fnum, + sizeof(*priv->flow_entries), + GFP_KERNEL); + if (!priv->flow_entries) + return -ENOMEM; + + for (i = 0; i < priv->flow_entries_max; i++) + priv->flow_entries[i].idx = i; + + dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n", + priv->flow_entries_max); + } + + ret = tc_rfs_init(priv); + if (ret) + return -ENOMEM; + + if (!priv->plat->fpe_cfg) { + priv->plat->fpe_cfg = devm_kzalloc(priv->device, + sizeof(*priv->plat->fpe_cfg), + GFP_KERNEL); + if (!priv->plat->fpe_cfg) + return -ENOMEM; + } else { + memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg)); + } + + /* Fail silently as we can still use remaining features, e.g. CBS */ + if (!dma_cap->frpsel) + return 0; + + switch (dma_cap->frpbs) { + case 0x0: + priv->tc_off_max = 64; + break; + case 0x1: + priv->tc_off_max = 128; + break; + case 0x2: + priv->tc_off_max = 256; + break; + default: + return -EINVAL; + } + + switch (dma_cap->frpes) { + case 0x0: + count = 64; + break; + case 0x1: + count = 128; + break; + case 0x2: + count = 256; + break; + default: + return -EINVAL; + } + + /* Reserve one last filter which lets all pass */ + priv->tc_entries_max = count; + priv->tc_entries = devm_kcalloc(priv->device, + count, sizeof(*priv->tc_entries), GFP_KERNEL); + if (!priv->tc_entries) + return -ENOMEM; + + tc_fill_all_pass_entry(&priv->tc_entries[count - 1]); + + dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n", + priv->tc_entries_max, priv->tc_off_max); + + return 0; +} + +static int tc_setup_cbs(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue = qopt->queue; + u32 ptr, speed_div; + u32 mode_to_use; + u64 value; + int ret; + + /* Queue 0 is not AVB capable */ + if (queue <= 0 || queue >= tx_queues_count) + return -EINVAL; + if (!priv->dma_cap.av) + return -EOPNOTSUPP; + + /* Port Transmit Rate and Speed Divider */ + switch (priv->speed) { + case SPEED_10000: + ptr = 32; + speed_div = 10000000; + break; + case SPEED_5000: + ptr = 32; + speed_div = 5000000; + break; + case SPEED_2500: + ptr = 8; + speed_div = 2500000; + break; + case SPEED_1000: + ptr = 8; + speed_div = 1000000; + break; + case SPEED_100: + ptr = 4; + speed_div = 100000; + break; + default: + return -EOPNOTSUPP; + } + + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; + if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) { + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; + } else if (!qopt->enable) { + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, + MTL_QUEUE_DCB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; + } + + /* Final adjustments for HW */ + value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); + priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); + + value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div); + priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); + + value = qopt->hicredit * 1024ll * 8; + priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0); + + value = qopt->locredit * 1024ll * 8; + priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0); + + ret = stmmac_config_cbs(priv, priv->hw, + priv->plat->tx_queues_cfg[queue].send_slope, + priv->plat->tx_queues_cfg[queue].idle_slope, + priv->plat->tx_queues_cfg[queue].high_credit, + priv->plat->tx_queues_cfg[queue].low_credit, + queue); + if (ret) + return ret; + + dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n", + queue, qopt->sendslope, qopt->idleslope, + qopt->hicredit, qopt->locredit); + return 0; +} + +static int tc_parse_flow_actions(struct stmmac_priv *priv, + struct flow_action *action, + struct stmmac_flow_entry *entry, + struct netlink_ext_ack *extack) +{ + struct flow_action_entry *act; + int i; + + if (!flow_action_has_entries(action)) + return -EINVAL; + + if (!flow_action_basic_hw_stats_check(action, extack)) + return -EOPNOTSUPP; + + flow_action_for_each(i, act, action) { + switch (act->id) { + case FLOW_ACTION_DROP: + entry->action |= STMMAC_FLOW_ACTION_DROP; + return 0; + default: + break; + } + } + + /* Nothing to do, maybe inverse filter ? */ + return 0; +} + +#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0) + +static int tc_add_basic_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + struct flow_match_basic match; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) + return -EINVAL; + + flow_rule_match_basic(rule, &match); + + entry->ip_proto = match.key->ip_proto; + return 0; +} + +static int tc_add_ip4_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + bool inv = entry->action & STMMAC_FLOW_ACTION_DROP; + struct flow_match_ipv4_addrs match; + u32 hw_match; + int ret; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) + return -EINVAL; + + flow_rule_match_ipv4_addrs(rule, &match); + hw_match = ntohl(match.key->src) & ntohl(match.mask->src); + if (hw_match) { + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true, + false, true, inv, hw_match); + if (ret) + return ret; + } + + hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst); + if (hw_match) { + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true, + false, false, inv, hw_match); + if (ret) + return ret; + } + + return 0; +} + +static int tc_add_ports_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + bool inv = entry->action & STMMAC_FLOW_ACTION_DROP; + struct flow_match_ports match; + u32 hw_match; + bool is_udp; + int ret; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) + return -EINVAL; + + switch (entry->ip_proto) { + case IPPROTO_TCP: + is_udp = false; + break; + case IPPROTO_UDP: + is_udp = true; + break; + default: + return -EINVAL; + } + + flow_rule_match_ports(rule, &match); + + hw_match = ntohs(match.key->src) & ntohs(match.mask->src); + if (hw_match) { + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true, + is_udp, true, inv, hw_match); + if (ret) + return ret; + } + + hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst); + if (hw_match) { + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true, + is_udp, false, inv, hw_match); + if (ret) + return ret; + } + + entry->is_l4 = true; + return 0; +} + +static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + bool get_free) +{ + int i; + + for (i = 0; i < priv->flow_entries_max; i++) { + struct stmmac_flow_entry *entry = &priv->flow_entries[i]; + + if (entry->cookie == cls->cookie) + return entry; + if (get_free && (entry->in_use == false)) + return entry; + } + + return NULL; +} + +static struct { + int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry); +} tc_flow_parsers[] = { + { .fn = tc_add_basic_flow }, + { .fn = tc_add_ip4_flow }, + { .fn = tc_add_ports_flow }, +}; + +static int tc_add_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + int i, ret; + + if (!entry) { + entry = tc_find_flow(priv, cls, true); + if (!entry) + return -ENOENT; + } + + ret = tc_parse_flow_actions(priv, &rule->action, entry, + cls->common.extack); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) { + ret = tc_flow_parsers[i].fn(priv, cls, entry); + if (!ret) + entry->in_use = true; + } + + if (!entry->in_use) + return -EINVAL; + + entry->cookie = cls->cookie; + return 0; +} + +static int tc_del_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); + int ret; + + if (!entry || !entry->in_use) + return -ENOENT; + + if (entry->is_l4) { + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false, + false, false, false, 0); + } else { + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false, + false, false, false, 0); + } + + entry->in_use = false; + entry->cookie = 0; + entry->is_l4 = false; + return ret; +} + +static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + bool get_free) +{ + int i; + + for (i = 0; i < priv->rfs_entries_total; i++) { + struct stmmac_rfs_entry *entry = &priv->rfs_entries[i]; + + if (entry->cookie == cls->cookie) + return entry; + if (get_free && entry->in_use == false) + return entry; + } + + return NULL; +} + +#define VLAN_PRIO_FULL_MASK (0x07) + +static int tc_add_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + struct flow_match_vlan match; + + if (!entry) { + entry = tc_find_rfs(priv, cls, true); + if (!entry) + return -ENOENT; + } + + if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >= + priv->rfs_entries_max[STMMAC_RFS_T_VLAN]) + return -ENOENT; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + flow_rule_match_vlan(rule, &match); + + if (match.mask->vlan_priority) { + u32 prio; + + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + netdev_err(priv->dev, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + prio = BIT(match.key->vlan_priority); + stmmac_rx_queue_prio(priv, priv->hw, prio, tc); + + entry->in_use = true; + entry->cookie = cls->cookie; + entry->tc = tc; + entry->type = STMMAC_RFS_T_VLAN; + priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++; + } + + return 0; +} + +static int tc_del_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + + if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN) + return -ENOENT; + + stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc); + + entry->in_use = false; + entry->cookie = 0; + entry->tc = 0; + entry->type = 0; + + priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--; + + return 0; +} + +static int tc_add_ethtype_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + struct flow_match_basic match; + + if (!entry) { + entry = tc_find_rfs(priv, cls, true); + if (!entry) + return -ENOENT; + } + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + flow_rule_match_basic(rule, &match); + + if (match.mask->n_proto) { + u16 etype = ntohs(match.key->n_proto); + + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { + netdev_err(priv->dev, "Only full mask is supported for EthType filter"); + return -EINVAL; + } + switch (etype) { + case ETH_P_LLDP: + if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >= + priv->rfs_entries_max[STMMAC_RFS_T_LLDP]) + return -ENOENT; + + entry->type = STMMAC_RFS_T_LLDP; + priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++; + + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_DCBCPQ, tc); + break; + case ETH_P_1588: + if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >= + priv->rfs_entries_max[STMMAC_RFS_T_1588]) + return -ENOENT; + + entry->type = STMMAC_RFS_T_1588; + priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++; + + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_PTPQ, tc); + break; + default: + netdev_err(priv->dev, "EthType(0x%x) is not supported", etype); + return -EINVAL; + } + + entry->in_use = true; + entry->cookie = cls->cookie; + entry->tc = tc; + entry->etype = etype; + + return 0; + } + + return -EINVAL; +} + +static int tc_del_ethtype_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + + if (!entry || !entry->in_use || + entry->type < STMMAC_RFS_T_LLDP || + entry->type > STMMAC_RFS_T_1588) + return -ENOENT; + + switch (entry->etype) { + case ETH_P_LLDP: + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_DCBCPQ, 0); + priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--; + break; + case ETH_P_1588: + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_PTPQ, 0); + priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--; + break; + default: + netdev_err(priv->dev, "EthType(0x%x) is not supported", + entry->etype); + return -EINVAL; + } + + entry->in_use = false; + entry->cookie = 0; + entry->tc = 0; + entry->etype = 0; + entry->type = 0; + + return 0; +} + +static int tc_add_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_add_flow(priv, cls); + if (!ret) + return ret; + + ret = tc_add_ethtype_flow(priv, cls); + if (!ret) + return ret; + + return tc_add_vlan_flow(priv, cls); +} + +static int tc_del_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_del_flow(priv, cls); + if (!ret) + return ret; + + ret = tc_del_ethtype_flow(priv, cls); + if (!ret) + return ret; + + return tc_del_vlan_flow(priv, cls); +} + +static int tc_setup_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret = 0; + + /* When RSS is enabled, the filtering will be bypassed */ + if (priv->rss.enable) + return -EBUSY; + + switch (cls->command) { + case FLOW_CLS_REPLACE: + ret = tc_add_flow_cls(priv, cls); + break; + case FLOW_CLS_DESTROY: + ret = tc_del_flow_cls(priv, cls); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time) +{ + struct timespec64 time; + + if (ktime_after(old_base_time, current_time)) { + time = ktime_to_timespec64(old_base_time); + } else { + s64 n; + ktime_t base_time; + + n = div64_s64(ktime_sub_ns(current_time, old_base_time), + cycle_time); + base_time = ktime_add_ns(old_base_time, + (n + 1) * cycle_time); + + time = ktime_to_timespec64(base_time); + } + + return time; +} + +static int tc_setup_taprio(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) +{ + u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; + struct plat_stmmacenet_data *plat = priv->plat; + struct timespec64 time, current_time, qopt_time; + ktime_t current_time_ns; + bool fpe = false; + int i, ret = 0; + u64 ctr; + + if (!priv->dma_cap.estsel) + return -EOPNOTSUPP; + + switch (wid) { + case 0x1: + wid = 16; + break; + case 0x2: + wid = 20; + break; + case 0x3: + wid = 24; + break; + default: + return -EOPNOTSUPP; + } + + switch (dep) { + case 0x1: + dep = 64; + break; + case 0x2: + dep = 128; + break; + case 0x3: + dep = 256; + break; + case 0x4: + dep = 512; + break; + case 0x5: + dep = 1024; + break; + default: + return -EOPNOTSUPP; + } + + if (!qopt->enable) + goto disable; + if (qopt->num_entries >= dep) + return -EINVAL; + if (!qopt->cycle_time) + return -ERANGE; + + if (!plat->est) { + plat->est = devm_kzalloc(priv->device, sizeof(*plat->est), + GFP_KERNEL); + if (!plat->est) + return -ENOMEM; + + mutex_init(&priv->plat->est->lock); + } else { + memset(plat->est, 0, sizeof(*plat->est)); + } + + size = qopt->num_entries; + + mutex_lock(&priv->plat->est->lock); + priv->plat->est->gcl_size = size; + priv->plat->est->enable = qopt->enable; + mutex_unlock(&priv->plat->est->lock); + + for (i = 0; i < size; i++) { + s64 delta_ns = qopt->entries[i].interval; + u32 gates = qopt->entries[i].gate_mask; + + if (delta_ns > GENMASK(wid, 0)) + return -ERANGE; + if (gates > GENMASK(31 - wid, 0)) + return -ERANGE; + + switch (qopt->entries[i].command) { + case TC_TAPRIO_CMD_SET_GATES: + if (fpe) + return -EINVAL; + break; + case TC_TAPRIO_CMD_SET_AND_HOLD: + gates |= BIT(0); + fpe = true; + break; + case TC_TAPRIO_CMD_SET_AND_RELEASE: + gates &= ~BIT(0); + fpe = true; + break; + default: + return -EOPNOTSUPP; + } + + priv->plat->est->gcl[i] = delta_ns | (gates << wid); + } + + mutex_lock(&priv->plat->est->lock); + /* Adjust for real system time */ + priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); + current_time_ns = timespec64_to_ktime(current_time); + time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns, + qopt->cycle_time); + + priv->plat->est->btr[0] = (u32)time.tv_nsec; + priv->plat->est->btr[1] = (u32)time.tv_sec; + + qopt_time = ktime_to_timespec64(qopt->base_time); + priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec; + priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec; + + ctr = qopt->cycle_time; + priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC); + priv->plat->est->ctr[1] = (u32)ctr; + + if (fpe && !priv->dma_cap.fpesel) { + mutex_unlock(&priv->plat->est->lock); + return -EOPNOTSUPP; + } + + /* Actual FPE register configuration will be done after FPE handshake + * is success. + */ + priv->plat->fpe_cfg->enable = fpe; + + ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + if (ret) { + netdev_err(priv->dev, "failed to configure EST\n"); + goto disable; + } + + netdev_info(priv->dev, "configured EST\n"); + + if (fpe) { + stmmac_fpe_handshake(priv, true); + netdev_info(priv->dev, "start FPE handshake\n"); + } + + return 0; + +disable: + if (priv->plat->est) { + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } + + priv->plat->fpe_cfg->enable = false; + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + false); + netdev_info(priv->dev, "disabled FPE\n"); + + stmmac_fpe_handshake(priv, false); + netdev_info(priv->dev, "stop FPE handshake\n"); + + return ret; +} + +static int tc_setup_etf(struct stmmac_priv *priv, + struct tc_etf_qopt_offload *qopt) +{ + if (!priv->dma_cap.tbssel) + return -EOPNOTSUPP; + if (qopt->queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL)) + return -EINVAL; + + if (qopt->enable) + priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN; + else + priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN; + + netdev_info(priv->dev, "%s ETF for Queue %d\n", + qopt->enable ? "enabled" : "disabled", qopt->queue); + return 0; +} + +const struct stmmac_tc_ops dwmac510_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, + .setup_cls = tc_setup_cls, + .setup_taprio = tc_setup_taprio, + .setup_etf = tc_setup_etf, +}; diff --git a/devices/stmmac/stmmac_tc-6.1-orig.c b/devices/stmmac/stmmac_tc-6.1-orig.c new file mode 100644 index 00000000..390c9008 --- /dev/null +++ b/devices/stmmac/stmmac_tc-6.1-orig.c @@ -0,0 +1,1115 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac TC Handling (HW only) + */ + +#include +#include +#include "common.h" +#include "dwmac4.h" +#include "dwmac5.h" +#include "stmmac.h" + +static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry) +{ + memset(entry, 0, sizeof(*entry)); + entry->in_use = true; + entry->is_last = true; + entry->is_frag = false; + entry->prio = ~0x0; + entry->handle = 0; + entry->val.match_data = 0x0; + entry->val.match_en = 0x0; + entry->val.af = 1; + entry->val.dma_ch_no = 0x0; +} + +static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls, + bool free) +{ + struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL; + u32 loc = cls->knode.handle; + int i; + + for (i = 0; i < priv->tc_entries_max; i++) { + entry = &priv->tc_entries[i]; + if (!entry->in_use && !first && free) + first = entry; + if ((entry->handle == loc) && !free && !entry->is_frag) + dup = entry; + } + + if (dup) + return dup; + if (first) { + first->handle = loc; + first->in_use = true; + + /* Reset HW values */ + memset(&first->val, 0, sizeof(first->val)); + } + + return first; +} + +static int tc_fill_actions(struct stmmac_tc_entry *entry, + struct stmmac_tc_entry *frag, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *action_entry = entry; + const struct tc_action *act; + struct tcf_exts *exts; + int i; + + exts = cls->knode.exts; + if (!tcf_exts_has_actions(exts)) + return -EINVAL; + if (frag) + action_entry = frag; + + tcf_exts_for_each_action(i, act, exts) { + /* Accept */ + if (is_tcf_gact_ok(act)) { + action_entry->val.af = 1; + break; + } + /* Drop */ + if (is_tcf_gact_shot(act)) { + action_entry->val.rf = 1; + break; + } + + /* Unsupported */ + return -EINVAL; + } + + return 0; +} + +static int tc_fill_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *entry, *frag = NULL; + struct tc_u32_sel *sel = cls->knode.sel; + u32 off, data, mask, real_off, rem; + u32 prio = cls->common.prio << 16; + int ret; + + /* Only 1 match per entry */ + if (sel->nkeys <= 0 || sel->nkeys > 1) + return -EINVAL; + + off = sel->keys[0].off << sel->offshift; + data = sel->keys[0].val; + mask = sel->keys[0].mask; + + switch (ntohs(cls->common.protocol)) { + case ETH_P_ALL: + break; + case ETH_P_IP: + off += ETH_HLEN; + break; + default: + return -EINVAL; + } + + if (off > priv->tc_off_max) + return -EINVAL; + + real_off = off / 4; + rem = off % 4; + + entry = tc_find_entry(priv, cls, true); + if (!entry) + return -EINVAL; + + if (rem) { + frag = tc_find_entry(priv, cls, true); + if (!frag) { + ret = -EINVAL; + goto err_unuse; + } + + entry->frag_ptr = frag; + entry->val.match_en = (mask << (rem * 8)) & + GENMASK(31, rem * 8); + entry->val.match_data = (data << (rem * 8)) & + GENMASK(31, rem * 8); + entry->val.frame_offset = real_off; + entry->prio = prio; + + frag->val.match_en = (mask >> (rem * 8)) & + GENMASK(rem * 8 - 1, 0); + frag->val.match_data = (data >> (rem * 8)) & + GENMASK(rem * 8 - 1, 0); + frag->val.frame_offset = real_off + 1; + frag->prio = prio; + frag->is_frag = true; + } else { + entry->frag_ptr = NULL; + entry->val.match_en = mask; + entry->val.match_data = data; + entry->val.frame_offset = real_off; + entry->prio = prio; + } + + ret = tc_fill_actions(entry, frag, cls); + if (ret) + goto err_unuse; + + return 0; + +err_unuse: + if (frag) + frag->in_use = false; + entry->in_use = false; + return ret; +} + +static void tc_unfill_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *entry; + + entry = tc_find_entry(priv, cls, false); + if (!entry) + return; + + entry->in_use = false; + if (entry->frag_ptr) { + entry = entry->frag_ptr; + entry->is_frag = false; + entry->in_use = false; + } +} + +static int tc_config_knode(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + int ret; + + ret = tc_fill_entry(priv, cls); + if (ret) + return ret; + + ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); + if (ret) + goto err_unfill; + + return 0; + +err_unfill: + tc_unfill_entry(priv, cls); + return ret; +} + +static int tc_delete_knode(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + /* Set entry and fragments as not used */ + tc_unfill_entry(priv, cls); + + return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); +} + +static int tc_setup_cls_u32(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + switch (cls->command) { + case TC_CLSU32_REPLACE_KNODE: + tc_unfill_entry(priv, cls); + fallthrough; + case TC_CLSU32_NEW_KNODE: + return tc_config_knode(priv, cls); + case TC_CLSU32_DELETE_KNODE: + return tc_delete_knode(priv, cls); + default: + return -EOPNOTSUPP; + } +} + +static int tc_rfs_init(struct stmmac_priv *priv) +{ + int i; + + priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8; + priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1; + priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1; + + for (i = 0; i < STMMAC_RFS_T_MAX; i++) + priv->rfs_entries_total += priv->rfs_entries_max[i]; + + priv->rfs_entries = devm_kcalloc(priv->device, + priv->rfs_entries_total, + sizeof(*priv->rfs_entries), + GFP_KERNEL); + if (!priv->rfs_entries) + return -ENOMEM; + + dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n", + priv->rfs_entries_total); + + return 0; +} + +static int tc_init(struct stmmac_priv *priv) +{ + struct dma_features *dma_cap = &priv->dma_cap; + unsigned int count; + int ret, i; + + if (dma_cap->l3l4fnum) { + priv->flow_entries_max = dma_cap->l3l4fnum; + priv->flow_entries = devm_kcalloc(priv->device, + dma_cap->l3l4fnum, + sizeof(*priv->flow_entries), + GFP_KERNEL); + if (!priv->flow_entries) + return -ENOMEM; + + for (i = 0; i < priv->flow_entries_max; i++) + priv->flow_entries[i].idx = i; + + dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n", + priv->flow_entries_max); + } + + ret = tc_rfs_init(priv); + if (ret) + return -ENOMEM; + + if (!priv->plat->fpe_cfg) { + priv->plat->fpe_cfg = devm_kzalloc(priv->device, + sizeof(*priv->plat->fpe_cfg), + GFP_KERNEL); + if (!priv->plat->fpe_cfg) + return -ENOMEM; + } else { + memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg)); + } + + /* Fail silently as we can still use remaining features, e.g. CBS */ + if (!dma_cap->frpsel) + return 0; + + switch (dma_cap->frpbs) { + case 0x0: + priv->tc_off_max = 64; + break; + case 0x1: + priv->tc_off_max = 128; + break; + case 0x2: + priv->tc_off_max = 256; + break; + default: + return -EINVAL; + } + + switch (dma_cap->frpes) { + case 0x0: + count = 64; + break; + case 0x1: + count = 128; + break; + case 0x2: + count = 256; + break; + default: + return -EINVAL; + } + + /* Reserve one last filter which lets all pass */ + priv->tc_entries_max = count; + priv->tc_entries = devm_kcalloc(priv->device, + count, sizeof(*priv->tc_entries), GFP_KERNEL); + if (!priv->tc_entries) + return -ENOMEM; + + tc_fill_all_pass_entry(&priv->tc_entries[count - 1]); + + dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n", + priv->tc_entries_max, priv->tc_off_max); + + return 0; +} + +static int tc_setup_cbs(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue = qopt->queue; + u32 ptr, speed_div; + u32 mode_to_use; + u64 value; + int ret; + + /* Queue 0 is not AVB capable */ + if (queue <= 0 || queue >= tx_queues_count) + return -EINVAL; + if (!priv->dma_cap.av) + return -EOPNOTSUPP; + + /* Port Transmit Rate and Speed Divider */ + switch (priv->speed) { + case SPEED_10000: + ptr = 32; + speed_div = 10000000; + break; + case SPEED_5000: + ptr = 32; + speed_div = 5000000; + break; + case SPEED_2500: + ptr = 8; + speed_div = 2500000; + break; + case SPEED_1000: + ptr = 8; + speed_div = 1000000; + break; + case SPEED_100: + ptr = 4; + speed_div = 100000; + break; + default: + return -EOPNOTSUPP; + } + + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; + if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) { + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; + } else if (!qopt->enable) { + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, + MTL_QUEUE_DCB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; + } + + /* Final adjustments for HW */ + value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); + priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); + + value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div); + priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); + + value = qopt->hicredit * 1024ll * 8; + priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0); + + value = qopt->locredit * 1024ll * 8; + priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0); + + ret = stmmac_config_cbs(priv, priv->hw, + priv->plat->tx_queues_cfg[queue].send_slope, + priv->plat->tx_queues_cfg[queue].idle_slope, + priv->plat->tx_queues_cfg[queue].high_credit, + priv->plat->tx_queues_cfg[queue].low_credit, + queue); + if (ret) + return ret; + + dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n", + queue, qopt->sendslope, qopt->idleslope, + qopt->hicredit, qopt->locredit); + return 0; +} + +static int tc_parse_flow_actions(struct stmmac_priv *priv, + struct flow_action *action, + struct stmmac_flow_entry *entry, + struct netlink_ext_ack *extack) +{ + struct flow_action_entry *act; + int i; + + if (!flow_action_has_entries(action)) + return -EINVAL; + + if (!flow_action_basic_hw_stats_check(action, extack)) + return -EOPNOTSUPP; + + flow_action_for_each(i, act, action) { + switch (act->id) { + case FLOW_ACTION_DROP: + entry->action |= STMMAC_FLOW_ACTION_DROP; + return 0; + default: + break; + } + } + + /* Nothing to do, maybe inverse filter ? */ + return 0; +} + +#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0) + +static int tc_add_basic_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + struct flow_match_basic match; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) + return -EINVAL; + + flow_rule_match_basic(rule, &match); + + entry->ip_proto = match.key->ip_proto; + return 0; +} + +static int tc_add_ip4_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + bool inv = entry->action & STMMAC_FLOW_ACTION_DROP; + struct flow_match_ipv4_addrs match; + u32 hw_match; + int ret; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) + return -EINVAL; + + flow_rule_match_ipv4_addrs(rule, &match); + hw_match = ntohl(match.key->src) & ntohl(match.mask->src); + if (hw_match) { + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true, + false, true, inv, hw_match); + if (ret) + return ret; + } + + hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst); + if (hw_match) { + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true, + false, false, inv, hw_match); + if (ret) + return ret; + } + + return 0; +} + +static int tc_add_ports_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + bool inv = entry->action & STMMAC_FLOW_ACTION_DROP; + struct flow_match_ports match; + u32 hw_match; + bool is_udp; + int ret; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) + return -EINVAL; + + switch (entry->ip_proto) { + case IPPROTO_TCP: + is_udp = false; + break; + case IPPROTO_UDP: + is_udp = true; + break; + default: + return -EINVAL; + } + + flow_rule_match_ports(rule, &match); + + hw_match = ntohs(match.key->src) & ntohs(match.mask->src); + if (hw_match) { + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true, + is_udp, true, inv, hw_match); + if (ret) + return ret; + } + + hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst); + if (hw_match) { + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true, + is_udp, false, inv, hw_match); + if (ret) + return ret; + } + + entry->is_l4 = true; + return 0; +} + +static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + bool get_free) +{ + int i; + + for (i = 0; i < priv->flow_entries_max; i++) { + struct stmmac_flow_entry *entry = &priv->flow_entries[i]; + + if (entry->cookie == cls->cookie) + return entry; + if (get_free && (entry->in_use == false)) + return entry; + } + + return NULL; +} + +static struct { + int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls, + struct stmmac_flow_entry *entry); +} tc_flow_parsers[] = { + { .fn = tc_add_basic_flow }, + { .fn = tc_add_ip4_flow }, + { .fn = tc_add_ports_flow }, +}; + +static int tc_add_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + int i, ret; + + if (!entry) { + entry = tc_find_flow(priv, cls, true); + if (!entry) + return -ENOENT; + } + + ret = tc_parse_flow_actions(priv, &rule->action, entry, + cls->common.extack); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) { + ret = tc_flow_parsers[i].fn(priv, cls, entry); + if (!ret) + entry->in_use = true; + } + + if (!entry->in_use) + return -EINVAL; + + entry->cookie = cls->cookie; + return 0; +} + +static int tc_del_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); + int ret; + + if (!entry || !entry->in_use) + return -ENOENT; + + if (entry->is_l4) { + ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false, + false, false, false, 0); + } else { + ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false, + false, false, false, 0); + } + + entry->in_use = false; + entry->cookie = 0; + entry->is_l4 = false; + return ret; +} + +static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + bool get_free) +{ + int i; + + for (i = 0; i < priv->rfs_entries_total; i++) { + struct stmmac_rfs_entry *entry = &priv->rfs_entries[i]; + + if (entry->cookie == cls->cookie) + return entry; + if (get_free && entry->in_use == false) + return entry; + } + + return NULL; +} + +#define VLAN_PRIO_FULL_MASK (0x07) + +static int tc_add_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + struct flow_match_vlan match; + + if (!entry) { + entry = tc_find_rfs(priv, cls, true); + if (!entry) + return -ENOENT; + } + + if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >= + priv->rfs_entries_max[STMMAC_RFS_T_VLAN]) + return -ENOENT; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + flow_rule_match_vlan(rule, &match); + + if (match.mask->vlan_priority) { + u32 prio; + + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + netdev_err(priv->dev, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + prio = BIT(match.key->vlan_priority); + stmmac_rx_queue_prio(priv, priv->hw, prio, tc); + + entry->in_use = true; + entry->cookie = cls->cookie; + entry->tc = tc; + entry->type = STMMAC_RFS_T_VLAN; + priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++; + } + + return 0; +} + +static int tc_del_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + + if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN) + return -ENOENT; + + stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc); + + entry->in_use = false; + entry->cookie = 0; + entry->tc = 0; + entry->type = 0; + + priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--; + + return 0; +} + +static int tc_add_ethtype_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + struct flow_match_basic match; + + if (!entry) { + entry = tc_find_rfs(priv, cls, true); + if (!entry) + return -ENOENT; + } + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + flow_rule_match_basic(rule, &match); + + if (match.mask->n_proto) { + u16 etype = ntohs(match.key->n_proto); + + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { + netdev_err(priv->dev, "Only full mask is supported for EthType filter"); + return -EINVAL; + } + switch (etype) { + case ETH_P_LLDP: + if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >= + priv->rfs_entries_max[STMMAC_RFS_T_LLDP]) + return -ENOENT; + + entry->type = STMMAC_RFS_T_LLDP; + priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++; + + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_DCBCPQ, tc); + break; + case ETH_P_1588: + if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >= + priv->rfs_entries_max[STMMAC_RFS_T_1588]) + return -ENOENT; + + entry->type = STMMAC_RFS_T_1588; + priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++; + + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_PTPQ, tc); + break; + default: + netdev_err(priv->dev, "EthType(0x%x) is not supported", etype); + return -EINVAL; + } + + entry->in_use = true; + entry->cookie = cls->cookie; + entry->tc = tc; + entry->etype = etype; + + return 0; + } + + return -EINVAL; +} + +static int tc_del_ethtype_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + + if (!entry || !entry->in_use || + entry->type < STMMAC_RFS_T_LLDP || + entry->type > STMMAC_RFS_T_1588) + return -ENOENT; + + switch (entry->etype) { + case ETH_P_LLDP: + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_DCBCPQ, 0); + priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--; + break; + case ETH_P_1588: + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_PTPQ, 0); + priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--; + break; + default: + netdev_err(priv->dev, "EthType(0x%x) is not supported", + entry->etype); + return -EINVAL; + } + + entry->in_use = false; + entry->cookie = 0; + entry->tc = 0; + entry->etype = 0; + entry->type = 0; + + return 0; +} + +static int tc_add_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_add_flow(priv, cls); + if (!ret) + return ret; + + ret = tc_add_ethtype_flow(priv, cls); + if (!ret) + return ret; + + return tc_add_vlan_flow(priv, cls); +} + +static int tc_del_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_del_flow(priv, cls); + if (!ret) + return ret; + + ret = tc_del_ethtype_flow(priv, cls); + if (!ret) + return ret; + + return tc_del_vlan_flow(priv, cls); +} + +static int tc_setup_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret = 0; + + /* When RSS is enabled, the filtering will be bypassed */ + if (priv->rss.enable) + return -EBUSY; + + switch (cls->command) { + case FLOW_CLS_REPLACE: + ret = tc_add_flow_cls(priv, cls); + break; + case FLOW_CLS_DESTROY: + ret = tc_del_flow_cls(priv, cls); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time) +{ + struct timespec64 time; + + if (ktime_after(old_base_time, current_time)) { + time = ktime_to_timespec64(old_base_time); + } else { + s64 n; + ktime_t base_time; + + n = div64_s64(ktime_sub_ns(current_time, old_base_time), + cycle_time); + base_time = ktime_add_ns(old_base_time, + (n + 1) * cycle_time); + + time = ktime_to_timespec64(base_time); + } + + return time; +} + +static int tc_setup_taprio(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) +{ + u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; + struct plat_stmmacenet_data *plat = priv->plat; + struct timespec64 time, current_time, qopt_time; + ktime_t current_time_ns; + bool fpe = false; + int i, ret = 0; + u64 ctr; + + if (!priv->dma_cap.estsel) + return -EOPNOTSUPP; + + switch (wid) { + case 0x1: + wid = 16; + break; + case 0x2: + wid = 20; + break; + case 0x3: + wid = 24; + break; + default: + return -EOPNOTSUPP; + } + + switch (dep) { + case 0x1: + dep = 64; + break; + case 0x2: + dep = 128; + break; + case 0x3: + dep = 256; + break; + case 0x4: + dep = 512; + break; + case 0x5: + dep = 1024; + break; + default: + return -EOPNOTSUPP; + } + + if (!qopt->enable) + goto disable; + if (qopt->num_entries >= dep) + return -EINVAL; + if (!qopt->cycle_time) + return -ERANGE; + + if (!plat->est) { + plat->est = devm_kzalloc(priv->device, sizeof(*plat->est), + GFP_KERNEL); + if (!plat->est) + return -ENOMEM; + + mutex_init(&priv->plat->est->lock); + } else { + memset(plat->est, 0, sizeof(*plat->est)); + } + + size = qopt->num_entries; + + mutex_lock(&priv->plat->est->lock); + priv->plat->est->gcl_size = size; + priv->plat->est->enable = qopt->enable; + mutex_unlock(&priv->plat->est->lock); + + for (i = 0; i < size; i++) { + s64 delta_ns = qopt->entries[i].interval; + u32 gates = qopt->entries[i].gate_mask; + + if (delta_ns > GENMASK(wid, 0)) + return -ERANGE; + if (gates > GENMASK(31 - wid, 0)) + return -ERANGE; + + switch (qopt->entries[i].command) { + case TC_TAPRIO_CMD_SET_GATES: + if (fpe) + return -EINVAL; + break; + case TC_TAPRIO_CMD_SET_AND_HOLD: + gates |= BIT(0); + fpe = true; + break; + case TC_TAPRIO_CMD_SET_AND_RELEASE: + gates &= ~BIT(0); + fpe = true; + break; + default: + return -EOPNOTSUPP; + } + + priv->plat->est->gcl[i] = delta_ns | (gates << wid); + } + + mutex_lock(&priv->plat->est->lock); + /* Adjust for real system time */ + priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); + current_time_ns = timespec64_to_ktime(current_time); + time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns, + qopt->cycle_time); + + priv->plat->est->btr[0] = (u32)time.tv_nsec; + priv->plat->est->btr[1] = (u32)time.tv_sec; + + qopt_time = ktime_to_timespec64(qopt->base_time); + priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec; + priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec; + + ctr = qopt->cycle_time; + priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC); + priv->plat->est->ctr[1] = (u32)ctr; + + if (fpe && !priv->dma_cap.fpesel) { + mutex_unlock(&priv->plat->est->lock); + return -EOPNOTSUPP; + } + + /* Actual FPE register configuration will be done after FPE handshake + * is success. + */ + priv->plat->fpe_cfg->enable = fpe; + + ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + if (ret) { + netdev_err(priv->dev, "failed to configure EST\n"); + goto disable; + } + + netdev_info(priv->dev, "configured EST\n"); + + if (fpe) { + stmmac_fpe_handshake(priv, true); + netdev_info(priv->dev, "start FPE handshake\n"); + } + + return 0; + +disable: + if (priv->plat->est) { + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } + + priv->plat->fpe_cfg->enable = false; + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->fpe_cfg, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + false); + netdev_info(priv->dev, "disabled FPE\n"); + + stmmac_fpe_handshake(priv, false); + netdev_info(priv->dev, "stop FPE handshake\n"); + + return ret; +} + +static int tc_setup_etf(struct stmmac_priv *priv, + struct tc_etf_qopt_offload *qopt) +{ + if (!priv->dma_cap.tbssel) + return -EOPNOTSUPP; + if (qopt->queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL)) + return -EINVAL; + + if (qopt->enable) + priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN; + else + priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN; + + netdev_info(priv->dev, "%s ETF for Queue %d\n", + qopt->enable ? "enabled" : "disabled", qopt->queue); + return 0; +} + +const struct stmmac_tc_ops dwmac510_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, + .setup_cls = tc_setup_cls, + .setup_taprio = tc_setup_taprio, + .setup_etf = tc_setup_etf, +}; diff --git a/devices/stmmac/stmmac_xdp-6.1-ethercat.c b/devices/stmmac/stmmac_xdp-6.1-ethercat.c new file mode 100644 index 00000000..f3a40d65 --- /dev/null +++ b/devices/stmmac/stmmac_xdp-6.1-ethercat.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021, Intel Corporation. */ + +#include + +#include "stmmac-6.1-ethercat.h" +#include "stmmac_xdp-6.1-ethercat.h" + +static int stmmac_xdp_enable_pool(struct stmmac_priv *priv, + struct xsk_buff_pool *pool, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + bool need_update; + u32 frame_size; + int err; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + /* XDP ZC does not span multiple frame, make sure XSK pool buffer + * size can at least store Q-in-Q frame. + */ + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) + return -EOPNOTSUPP; + + err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR); + if (err) { + netdev_err(priv->dev, "Failed to map xsk pool\n"); + return err; + } + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + napi_disable(&ch->rx_napi); + napi_disable(&ch->tx_napi); + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + } + + set_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rxtx_napi); + + err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX); + if (err) + return err; + } + + return 0; +} + +static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + struct xsk_buff_pool *pool; + bool need_update; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + pool = xsk_get_pool_from_qid(priv->dev, queue); + if (!pool) + return -EINVAL; + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + napi_disable(&ch->rxtx_napi); + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + synchronize_rcu(); + } + + xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR); + + clear_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rx_napi); + napi_enable(&ch->tx_napi); + } + + return 0; +} + +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue) +{ + return pool ? stmmac_xdp_enable_pool(priv, pool, queue) : + stmmac_xdp_disable_pool(priv, queue); +} + +int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = priv->dev; + struct bpf_prog *old_prog; + bool need_update; + bool if_running; + + if_running = netif_running(dev); + + if (prog && dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + need_update = !!priv->xdp_prog != !!prog; + if (if_running && need_update) + stmmac_xdp_release(dev); + + old_prog = xchg(&priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + /* Disable RX SPH for XDP operation */ + priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv); + + if (if_running && need_update) + stmmac_xdp_open(dev); + + return 0; +} diff --git a/devices/stmmac/stmmac_xdp-6.1-ethercat.h b/devices/stmmac/stmmac_xdp-6.1-ethercat.h new file mode 100644 index 00000000..896dc987 --- /dev/null +++ b/devices/stmmac/stmmac_xdp-6.1-ethercat.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _STMMAC_XDP_H_ +#define _STMMAC_XDP_H_ + +#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM) +#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue); +int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack); + +#endif /* _STMMAC_XDP_H_ */ diff --git a/devices/stmmac/stmmac_xdp-6.1-orig.c b/devices/stmmac/stmmac_xdp-6.1-orig.c new file mode 100644 index 00000000..9d4d8c3d --- /dev/null +++ b/devices/stmmac/stmmac_xdp-6.1-orig.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021, Intel Corporation. */ + +#include + +#include "stmmac.h" +#include "stmmac_xdp.h" + +static int stmmac_xdp_enable_pool(struct stmmac_priv *priv, + struct xsk_buff_pool *pool, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + bool need_update; + u32 frame_size; + int err; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + /* XDP ZC does not span multiple frame, make sure XSK pool buffer + * size can at least store Q-in-Q frame. + */ + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) + return -EOPNOTSUPP; + + err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR); + if (err) { + netdev_err(priv->dev, "Failed to map xsk pool\n"); + return err; + } + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + napi_disable(&ch->rx_napi); + napi_disable(&ch->tx_napi); + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + } + + set_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rxtx_napi); + + err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX); + if (err) + return err; + } + + return 0; +} + +static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + struct xsk_buff_pool *pool; + bool need_update; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + pool = xsk_get_pool_from_qid(priv->dev, queue); + if (!pool) + return -EINVAL; + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + napi_disable(&ch->rxtx_napi); + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + synchronize_rcu(); + } + + xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR); + + clear_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rx_napi); + napi_enable(&ch->tx_napi); + } + + return 0; +} + +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue) +{ + return pool ? stmmac_xdp_enable_pool(priv, pool, queue) : + stmmac_xdp_disable_pool(priv, queue); +} + +int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = priv->dev; + struct bpf_prog *old_prog; + bool need_update; + bool if_running; + + if_running = netif_running(dev); + + if (prog && dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + need_update = !!priv->xdp_prog != !!prog; + if (if_running && need_update) + stmmac_xdp_release(dev); + + old_prog = xchg(&priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + /* Disable RX SPH for XDP operation */ + priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv); + + if (if_running && need_update) + stmmac_xdp_open(dev); + + return 0; +} diff --git a/devices/stmmac/stmmac_xdp-6.1-orig.h b/devices/stmmac/stmmac_xdp-6.1-orig.h new file mode 100644 index 00000000..896dc987 --- /dev/null +++ b/devices/stmmac/stmmac_xdp-6.1-orig.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _STMMAC_XDP_H_ +#define _STMMAC_XDP_H_ + +#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM) +#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue); +int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack); + +#endif /* _STMMAC_XDP_H_ */ diff --git a/devices/stmmac/update.sh b/devices/stmmac/update.sh new file mode 100755 index 00000000..1e9c5070 --- /dev/null +++ b/devices/stmmac/update.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +if [ $# -ne 3 ]; then + echo "Need 3 arguments: 1) kernel source dir, 2) previous version, 3) version to add" + exit 1 +fi + +KERNELDIR=$1 +PREVER=$2 +KERNELVER=$3 + +IGBDIR=drivers/net/ethernet/stmicro/stmmac + +FILES="stmmac_main.c stmmac_ethtool.c stmmac_mdio.c ring_mode.c \ +chain_mode.c dwmac_lib.c dwmac1000_core.c dwmac1000_dma.c \ +dwmac100_core.c dwmac100_dma.c enh_desc.c norm_desc.c \ +mmc_core.c stmmac_hwtstamp.c stmmac_ptp.c dwmac4_descs.c \ +dwmac4_dma.c dwmac4_lib.c dwmac4_core.c dwmac5.c hwif.c \ +stmmac_tc.c dwxgmac2_core.c dwxgmac2_dma.c dwxgmac2_descs.c \ +stmmac_xdp.c \ +common.h \ +descs.h \ +descs_com.h \ +dwmac1000.h \ +dwmac100.h \ +dwmac4_descs.h \ +dwmac4_dma.h \ +dwmac4.h \ +dwmac5.h \ +dwmac_dma.h \ +dwxgmac2.h \ +dwxlgmac2.h \ +hwif.h \ +mmc.h \ +stmmac.h \ +stmmac_pcs.h \ +stmmac_ptp.h \ +stmmac_xdp.h \ +dwmac-intel.c \ +dwmac-intel.h \ +stmmac_pci.c" + +set -x + +for f in $FILES; do + echo $f + o=${f/\./-$KERNELVER-orig.} + e=${f/\./-$KERNELVER-ethercat.} + cp $KERNELDIR/$IGBDIR/$f $o + chmod 644 $o + cp $o $e + op=${f/\./-$PREVER-orig.} + ep=${f/\./-$PREVER-ethercat.} + diff -up $op $ep | patch -p1 $e + sed -i s/$PREVER-ethercat.h/$KERNELVER-ethercat.h/ $e + git add $o $e +done diff --git a/examples/dc_rtai/Kbuild.in b/examples/dc_rtai/Kbuild.in index 9a694720..03b7a54a 100644 --- a/examples/dc_rtai/Kbuild.in +++ b/examples/dc_rtai/Kbuild.in @@ -30,7 +30,7 @@ obj-m := ec_dc_rtai_sample.o ec_dc_rtai_sample-objs := dc_rtai_sample.o -EXTRA_CFLAGS := -I@RTAI_DIR@/include +EXTRA_CFLAGS := @RTAI_KERNEL_CFLAGS@ KBUILD_EXTRA_SYMBOLS := \ @abs_top_builddir@/$(LINUX_SYMVERS) \ diff --git a/examples/rtai/Kbuild.in b/examples/rtai/Kbuild.in index ac0621ad..20e8453e 100644 --- a/examples/rtai/Kbuild.in +++ b/examples/rtai/Kbuild.in @@ -30,7 +30,7 @@ obj-m := ec_rtai_sample.o ec_rtai_sample-objs := rtai_sample.o -EXTRA_CFLAGS := -I@RTAI_DIR@/include +EXTRA_CFLAGS := @RTAI_KERNEL_CFLAGS@ KBUILD_EXTRA_SYMBOLS := \ @abs_top_builddir@/$(LINUX_SYMVERS) \ diff --git a/include/ecrt.h b/include/ecrt.h index 7a67c27c..cc1f1d07 100644 --- a/include/ecrt.h +++ b/include/ecrt.h @@ -27,20 +27,16 @@ * \defgroup ApplicationInterface EtherCAT Configuration Interface * * Interface for configuring the EtherCAT master for realtime applications. - * There are functions to request a master, to map process data, - * to communicate with slaves via CoE - * and to configure and activate the bus. - * All methods in this group should be used before the - * application switches to operational (real-time) mode - * by calling ecrt_master_activate(). - * After that, only calls to the functions in \ref ApplicationInterfaceRT - * are allowed. - * The real-time operational mode finishes by calling ecrt_master_deactivate(). - * Many functions in this group are blocking and/or they - * acquire locks. - * Do not use these functions in non-Userspace contexts, - * e.g. in RTAI/Xenomai Real-Time tasks or in atomic/softirq/tasklet - * context in kernel modules. You have been warned. + * There are functions to request a master, to map process data, to + * communicate with slaves via CoE and to configure and activate the network. + * All methods in this group should be used before the application switches to + * operational (real-time) mode by calling ecrt_master_activate(). After + * that, only calls to the functions in \ref ApplicationInterfaceRT are + * allowed. The real-time operational mode finishes by calling + * ecrt_master_deactivate(). Many functions in this group are blocking and/or + * they acquire locks. Do not use these functions in non-userspace contexts, + * e. g. in RTAI/Xenomai Real-Time tasks or in atomic/softirq/tasklet context + * in kernel modules. You have been warned. * * Changes in version 1.6.0: * @@ -114,10 +110,10 @@ * ecrt_slave_config_sync_manager()). * - Added ecrt_slave_config_complete_sdo() method to download an SDO during * configuration via CompleteAccess. - * - Added ecrt_master_deactivate() to remove the bus configuration. + * - Added ecrt_master_deactivate() to remove the master configuration. * - Added ecrt_open_master() and ecrt_master_reserve() separation for * userspace. - * - Added bus information interface (methods ecrt_master(), + * - Added master information interface (methods ecrt_master(), * ecrt_master_get_slave(), ecrt_master_get_sync_manager(), * ecrt_master_get_pdo() and ecrt_master_get_pdo_entry()) to get information * about the currently connected slaves and the PDO entries provided. @@ -344,7 +340,7 @@ typedef struct { unsigned int al_states : 4; /**< Application-layer states of all slaves. The states are coded in the lower 4 bits. If a bit is set, it means that at least one - slave in the bus is in the corresponding + slave in the network is in the corresponding state: - Bit 0: \a INIT - Bit 1: \a PREOP @@ -366,10 +362,10 @@ typedef struct { unsigned int slaves_responding; /**< Sum of responding slaves on the given link. */ unsigned int al_states : 4; /**< Application-layer states of the slaves on - the given link. The states are coded in the - lower 4 bits. If a bit is set, it means - that at least one slave in the bus is in the - corresponding state: + the given link. The states are coded in the + lower 4 bits. If a bit is set, it means + that at least one slave in the network is in + the corresponding state: - Bit 0: \a INIT - Bit 1: \a PREOP - Bit 2: \a SAFEOP @@ -409,9 +405,10 @@ typedef struct { * \see ecrt_master(). */ typedef struct { - unsigned int slave_count; /**< Number of slaves in the bus. */ + unsigned int slave_count; /**< Number of slaves in the network. */ unsigned int link_up : 1; /**< \a true, if the network link is up. */ - uint8_t scan_busy; /**< \a true, while the master is scanning the bus */ + uint8_t scan_busy; /**< \a true, while the master is scanning the network. + */ uint64_t app_time; /**< Application time. */ } ec_master_info_t; @@ -559,7 +556,7 @@ typedef struct { unsigned int n_entries; /**< Number of PDO entries in \a entries to map. Zero means, that the default mapping shall be used (this can only be done if the slave is - present at bus configuration time). */ + present at configuration time). */ ec_pdo_entry_info_t const *entries; /**< Array of PDO entries to map. Can either be \a NULL, or must contain at least \a n_entries values. */ @@ -721,14 +718,14 @@ EC_PUBLIC_API int ecrt_master_reserve( /** Sets the locking callbacks. * * For concurrent master access, i. e. if other instances than the application - * want to send and receive datagrams on the bus, the application has to + * want to send and receive datagrams on the network, the application has to * provide a callback mechanism. This method takes two function pointers as * its parameters. Asynchronous master access (like EoE processing) is only * possible if the callbacks have been set. * - * The task of the send callback (\a send_cb) is to decide, if the bus is - * currently accessible and whether or not to call the ecrt_master_send_ext() - * method. + * The task of the send callback (\a send_cb) is to decide, if the network + * hardware is currently accessible and whether or not to call the + * ecrt_master_send_ext() method. * * The task of the receive callback (\a receive_cb) is to decide, if a call to * ecrt_master_receive() is allowed and to execute it respectively. @@ -777,11 +774,11 @@ EC_PUBLIC_API ec_domain_t *ecrt_master_create_domain( * aliased slave, so a position of zero means the aliased slave itself and a * positive value matches the n-th slave behind the aliased one. * - * If the slave with the given address is found during the bus configuration, + * If the slave with the given address is found during the configuration, * its vendor ID and product code are matched against the given value. On * mismatch, the slave is not configured and an error message is raised. * - * If different slave configurations are pointing to the same slave during bus + * If different slave configurations are pointing to the same slave during * configuration, a warning is raised and only the first configuration is * applied. * @@ -1017,12 +1014,12 @@ EC_PUBLIC_API int ecrt_master_read_idn( * This function tells the master that the configuration phase is finished and * the realtime operation will begin. The function allocates internal memory * for the domains and calculates the logical FMMU addresses for domain - * members. It tells the master state machine that the bus configuration is - * now to be applied. + * members. It tells the master state machine that the configuration is + * now to be applied to the network. * * \attention After this function has been called, the realtime application is * in charge of cyclically calling ecrt_master_send() and - * ecrt_master_receive() to ensure bus communication. Before calling this + * ecrt_master_receive() to ensure network communication. Before calling this * function, the master thread is responsible for that, so these functions may * not be called! The method itself allocates memory and should not be called * in realtime context. @@ -1035,15 +1032,18 @@ EC_PUBLIC_API int ecrt_master_activate( /** Deactivates the master. * - * Removes the bus configuration. All objects created by + * Removes the master configuration. All objects created by * ecrt_master_create_domain(), ecrt_master_slave_config(), ecrt_domain_data() * ecrt_slave_config_create_sdo_request() and * ecrt_slave_config_create_voe_handler() are freed, so pointers to them * become invalid. * * This method should not be called in realtime context. + * \return 0 on success, otherwise negative error code. + * \retval 0 Success. + * \retval -EINVAL Master has not been activated before. */ -EC_PUBLIC_API void ecrt_master_deactivate( +EC_PUBLIC_API int ecrt_master_deactivate( ec_master_t *master /**< EtherCAT master. */ ); @@ -1071,8 +1071,9 @@ EC_PUBLIC_API int ecrt_master_set_send_interval( * has returned. * * \ingroup ApplicationInterfaceRT + * \return Zero on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_master_send( +EC_PUBLIC_API int ecrt_master_send( ec_master_t *master /**< EtherCAT master. */ ); @@ -1087,28 +1088,33 @@ EC_PUBLIC_API void ecrt_master_send( * ecrt_master_activate() has returned. * * \ingroup ApplicationInterfaceRT + * \return Zero on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_master_receive( +EC_PUBLIC_API int ecrt_master_receive( ec_master_t *master /**< EtherCAT master. */ ); +#ifdef __KERNEL__ /** Sends non-application datagrams. * * This method has to be called in the send callback function passed via * ecrt_master_callbacks() to allow the sending of non-application datagrams. + * \return Zero on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_master_send_ext( +int ecrt_master_send_ext( ec_master_t *master /**< EtherCAT master. */ ); +#endif /** Reads the current master state. * * Stores the master state information in the given \a state structure. * * This method returns a global state. For the link-specific states in a - * redundant bus topology, use the ecrt_master_link_state() method. + * redundant network topology, use the ecrt_master_link_state() method. + * \return Zero on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_master_state( +EC_PUBLIC_API int ecrt_master_state( const ec_master_t *master, /**< EtherCAT master. */ ec_master_state_t *state /**< Structure to store the information. */ ); @@ -1148,8 +1154,9 @@ EC_PUBLIC_API int ecrt_master_link_state( * necessary, since the absolute value is not of any interest. * * \ingroup ApplicationInterfaceRT + * \return Zero on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_master_application_time( +EC_PUBLIC_API int ecrt_master_application_time( ec_master_t *master, /**< EtherCAT master. */ uint64_t app_time /**< Application time. */ ); @@ -1160,8 +1167,11 @@ EC_PUBLIC_API void ecrt_master_application_time( * by the last call off ecrt_master_application_time(). * * \ingroup ApplicationInterfaceRT + * \return Zero on success, otherwise negative error code. + * \retval 0 Success. + * \retval -ENXIO No reference clock found. */ -EC_PUBLIC_API void ecrt_master_sync_reference_clock( +EC_PUBLIC_API int ecrt_master_sync_reference_clock( ec_master_t *master /**< EtherCAT master. */ ); @@ -1174,8 +1184,11 @@ EC_PUBLIC_API void ecrt_master_sync_reference_clock( * has returned. * * \ingroup ApplicationInterfaceRT + * \return Zero on success, otherwise negative error code. + * \retval 0 Success. + * \retval -ENXIO No reference clock found. */ -EC_PUBLIC_API void ecrt_master_sync_reference_clock_to( +EC_PUBLIC_API int ecrt_master_sync_reference_clock_to( ec_master_t *master, /**< EtherCAT master. */ uint64_t sync_time /**< Sync reference clock to this time. */ ); @@ -1188,8 +1201,11 @@ EC_PUBLIC_API void ecrt_master_sync_reference_clock_to( * has returned. * * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. + * \retval 0 Success. + * \retval -ENXIO No reference clock found. */ -EC_PUBLIC_API void ecrt_master_sync_slave_clocks( +EC_PUBLIC_API int ecrt_master_sync_slave_clocks( ec_master_t *master /**< EtherCAT master. */ ); @@ -1205,6 +1221,10 @@ EC_PUBLIC_API void ecrt_master_sync_slave_clocks( * \attention The returned time is the system time of the reference clock * minus the transmission delay of the reference clock. * + * Calling this method makes only sense in realtime context (after master + * activation), when the ecrt_master_sync_slave_clocks() method is called + * cyclically. + * * \retval 0 success, system time was written into \a time. * \retval -ENXIO No reference clock found. * \retval -EIO Slave synchronization datagram was not received. @@ -1222,7 +1242,7 @@ EC_PUBLIC_API int ecrt_master_reference_clock_time( * * \ingroup ApplicationInterfaceRT */ -EC_PUBLIC_API void ecrt_master_sync_monitor_queue( +EC_PUBLIC_API int ecrt_master_sync_monitor_queue( ec_master_t *master /**< EtherCAT master. */ ); @@ -1247,9 +1267,13 @@ EC_PUBLIC_API uint32_t ecrt_master_sync_monitor_process( * done by the master. But with special slaves, that can be reconfigured by * the vendor during runtime, it can be useful. * + * Calling this method only makes sense in realtime context (after + * activation), because slaves will not be configured before. + * * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_master_reset( +EC_PUBLIC_API int ecrt_master_reset( ec_master_t *master /**< EtherCAT master. */ ); @@ -1279,8 +1303,10 @@ EC_PUBLIC_API int ecrt_slave_config_sync_manager( * * This method has to be called in non-realtime context before * ecrt_master_activate(). + * + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_slave_config_watchdog( +EC_PUBLIC_API int ecrt_slave_config_watchdog( ec_slave_config_t *sc, /**< Slave configuration. */ uint16_t watchdog_divider, /**< Number of 40 ns intervals (register 0x0400). Used as a base unit for all @@ -1318,8 +1344,9 @@ EC_PUBLIC_API int ecrt_slave_config_pdo_assign_add( * ecrt_master_activate(). * * \see ecrt_slave_config_pdos() + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_slave_config_pdo_assign_clear( +EC_PUBLIC_API int ecrt_slave_config_pdo_assign_clear( ec_slave_config_t *sc, /**< Slave configuration. */ uint8_t sync_index /**< Sync manager index. Must be less than #EC_MAX_SYNC_MANAGERS. */ @@ -1352,8 +1379,9 @@ EC_PUBLIC_API int ecrt_slave_config_pdo_mapping_add( * ecrt_master_activate(). * * \see ecrt_slave_config_pdos() + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_slave_config_pdo_mapping_clear( +EC_PUBLIC_API int ecrt_slave_config_pdo_mapping_clear( ec_slave_config_t *sc, /**< Slave configuration. */ uint16_t pdo_index /**< Index of the PDO. */ ); @@ -1500,8 +1528,9 @@ EC_PUBLIC_API int ecrt_slave_config_reg_pdo_entry_pos( * ecrt_master_activate(). * * \attention The \a sync1_shift time is ignored. + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_slave_config_dc( +EC_PUBLIC_API int ecrt_slave_config_dc( ec_slave_config_t *sc, /**< Slave configuration. */ uint16_t assign_activate, /**< AssignActivate word. */ uint32_t sync0_cycle, /**< SYNC0 cycle time [ns]. */ @@ -1637,8 +1666,13 @@ EC_PUBLIC_API int ecrt_slave_config_emerg_size( * Byte 2: Error register * Byte 3-7: Data * + * Calling this method makes only sense in realtime context (after master + * activation). + * * \return 0 on success (record popped), or negative error code (i. e. * -ENOENT, if ring is empty). + * + * \ingroup ApplicationInterfaceRT */ EC_PUBLIC_API int ecrt_slave_config_emerg_pop( ec_slave_config_t *sc, /**< Slave configuration. */ @@ -1647,8 +1681,13 @@ EC_PUBLIC_API int ecrt_slave_config_emerg_pop( ); /** Clears CoE emergency ring buffer and the overrun counter. + * + * Calling this method makes only sense in realtime context (after master + * activation). * * \return 0 on success, or negative error code. + * + * \ingroup ApplicationInterfaceRT */ EC_PUBLIC_API int ecrt_slave_config_emerg_clear( ec_slave_config_t *sc /**< Slave configuration. */ @@ -1660,7 +1699,12 @@ EC_PUBLIC_API int ecrt_slave_config_emerg_clear( * not be stored in the ring buffer and had to be dropped. Call * ecrt_slave_config_emerg_clear() to reset the counter. * + * Calling this method makes only sense in realtime context (after master + * activation). + * * \return Number of overruns since last clear, or negative error code. + * + * \ingroup ApplicationInterfaceRT */ EC_PUBLIC_API int ecrt_slave_config_emerg_overruns( const ec_slave_config_t *sc /**< Slave configuration. */ @@ -1747,8 +1791,13 @@ EC_PUBLIC_API ec_reg_request_t *ecrt_slave_config_create_reg_request( * * \attention If the state of process data exchange shall be monitored in * realtime, ecrt_domain_state() should be used. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT */ -EC_PUBLIC_API void ecrt_slave_config_state( +EC_PUBLIC_API int ecrt_slave_config_state( const ec_slave_config_t *sc, /**< Slave configuration */ ec_slave_config_state_t *state /**< State object to write to. */ ); @@ -1956,7 +2005,7 @@ void ecrt_domain_external_memory( * \return Pointer to the process data memory. */ EC_PUBLIC_API uint8_t *ecrt_domain_data( - ec_domain_t *domain /**< Domain. */ + const ec_domain_t *domain /**< Domain. */ ); /** Determines the states of the domain's datagrams. @@ -1967,8 +2016,9 @@ EC_PUBLIC_API uint8_t *ecrt_domain_data( * ecrt_domain_state() return the result of the last process data exchange. * * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_domain_process( +EC_PUBLIC_API int ecrt_domain_process( ec_domain_t *domain /**< Domain. */ ); @@ -1978,8 +2028,9 @@ EC_PUBLIC_API void ecrt_domain_process( * next call of ecrt_master_send(). * * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_domain_queue( +EC_PUBLIC_API int ecrt_domain_queue( ec_domain_t *domain /**< Domain. */ ); @@ -1990,8 +2041,9 @@ EC_PUBLIC_API void ecrt_domain_queue( * Using this method, the process data exchange can be monitored in realtime. * * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_domain_state( +EC_PUBLIC_API int ecrt_domain_state( const ec_domain_t *domain, /**< Domain. */ ec_domain_state_t *state /**< Pointer to a state object to store the information. */ @@ -2006,8 +2058,15 @@ EC_PUBLIC_API void ecrt_domain_state( * \attention If the SDO index and/or subindex is changed while * ecrt_sdo_request_state() returns EC_REQUEST_BUSY, this may lead to * unexpected results. + * + * This method is meant to be called in realtime context (after master + * activation). To initialize the SDO request, the index and subindex can be + * set via ecrt_slave_config_create_sdo_request(). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_sdo_request_index( +EC_PUBLIC_API int ecrt_sdo_request_index( ec_sdo_request_t *req, /**< SDO request. */ uint16_t index, /**< SDO index. */ uint8_t subindex /**< SDO subindex. */ @@ -2020,8 +2079,12 @@ EC_PUBLIC_API void ecrt_sdo_request_index( * * The timeout is permanently stored in the request object and is valid until * the next call of this method. + * + * The timeout should be defined in non-realtime context, but can also be + * changed afterwards. + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_sdo_request_timeout( +EC_PUBLIC_API int ecrt_sdo_request_timeout( ec_sdo_request_t *req, /**< SDO request. */ uint32_t timeout /**< Timeout in milliseconds. Zero means no timeout. */ @@ -2048,7 +2111,12 @@ EC_PUBLIC_API void ecrt_sdo_request_timeout( * the internal SDO data memory could be re-allocated if the read SDO data do * not fit inside. * + * This method is meant to be called in realtime context (after master + * activation), but can also be used to initialize data before. + * * \return Pointer to the internal SDO data memory. + * + * \ingroup ApplicationInterfaceRT */ EC_PUBLIC_API uint8_t *ecrt_sdo_request_data( const ec_sdo_request_t *req /**< SDO request. */ @@ -2060,32 +2128,51 @@ EC_PUBLIC_API uint8_t *ecrt_sdo_request_data( * reserved memory. After a read operation the size is set to the size of the * read data. The size is not modified in any other situation. * + * This method is meant to be called in realtime context (after master + * activation). + * * \return SDO data size in bytes. + * + * \ingroup ApplicationInterfaceRT */ EC_PUBLIC_API size_t ecrt_sdo_request_data_size( const ec_sdo_request_t *req /**< SDO request. */ ); /** Get the current state of the SDO request. + * + * The user-space implementation fetches incoming data and stores the received + * data size in the request object, so the request is not const. + * + * This method is meant to be called in realtime context (after master + * activation). * * \return Request state. + * + * \ingroup ApplicationInterfaceRT */ -#ifdef __KERNEL__ -ec_request_state_t ecrt_sdo_request_state( - const ec_sdo_request_t *req /**< SDO request. */ - ); -#else EC_PUBLIC_API ec_request_state_t ecrt_sdo_request_state( - ec_sdo_request_t *req /**< SDO request. */ - ); +#ifdef __KERNEL__ + const #endif + ec_sdo_request_t *req /**< SDO request. */ + ); /** Schedule an SDO write operation. * * \attention This method may not be called while ecrt_sdo_request_state() * returns EC_REQUEST_BUSY. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. + * \retval -EINVAL Invalid input data, e.g. data size == 0. + * \retval -ENOBUFS Reserved memory in ecrt_slave_config_create_sdo_request() + * too small. */ -EC_PUBLIC_API void ecrt_sdo_request_write( +EC_PUBLIC_API int ecrt_sdo_request_write( ec_sdo_request_t *req /**< SDO request. */ ); @@ -2097,8 +2184,14 @@ EC_PUBLIC_API void ecrt_sdo_request_write( * \attention After calling this function, the return value of * ecrt_sdo_request_data() must be considered as invalid while * ecrt_sdo_request_state() returns EC_REQUEST_BUSY. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_sdo_request_read( +EC_PUBLIC_API int ecrt_sdo_request_read( ec_sdo_request_t *req /**< SDO request. */ ); @@ -2111,8 +2204,15 @@ EC_PUBLIC_API void ecrt_sdo_request_read( * \attention If the drive number and/or IDN is changed while * ecrt_soe_request_state() returns EC_REQUEST_BUSY, this may lead to * unexpected results. + * + * This method is meant to be called in realtime context (after master + * activation). To initialize the SoE request, the drive_no and IDN can be + * set via ecrt_slave_config_create_soe_request(). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_soe_request_idn( +EC_PUBLIC_API int ecrt_soe_request_idn( ec_soe_request_t *req, /**< IDN request. */ uint8_t drive_no, /**< SDO index. */ uint16_t idn /**< SoE IDN. */ @@ -2125,8 +2225,12 @@ EC_PUBLIC_API void ecrt_soe_request_idn( * * The timeout is permanently stored in the request object and is valid until * the next call of this method. + * + * The timeout should be defined in non-realtime context, but can also be + * changed afterwards. + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_soe_request_timeout( +EC_PUBLIC_API int ecrt_soe_request_timeout( ec_soe_request_t *req, /**< SoE request. */ uint32_t timeout /**< Timeout in milliseconds. Zero means no timeout. */ @@ -2153,7 +2257,12 @@ EC_PUBLIC_API void ecrt_soe_request_timeout( * because the internal IDN data memory could be re-allocated if the read IDN * data do not fit inside. * + * This method is meant to be called in realtime context (after master + * activation), but can also be used to initialize data before. + * * \return Pointer to the internal IDN data memory. + * + * \ingroup ApplicationInterfaceRT */ EC_PUBLIC_API uint8_t *ecrt_soe_request_data( const ec_soe_request_t *req /**< SoE request. */ @@ -2174,18 +2283,37 @@ EC_PUBLIC_API size_t ecrt_soe_request_data_size( /** Get the current state of the SoE request. * * \return Request state. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * In the user-space implementation, the method fetches the size of the + * incoming data, so the request object is not const. + * + * \ingroup ApplicationInterfaceRT */ EC_PUBLIC_API ec_request_state_t ecrt_soe_request_state( - ec_soe_request_t *req /**< SoE request. Not const, because the - internal data size may be updated. */ - ); +#ifdef __KERNEL__ + const +#endif + ec_soe_request_t *req /**< SoE request. */ + ); /** Schedule an SoE IDN write operation. * * \attention This method may not be called while ecrt_soe_request_state() * returns EC_REQUEST_BUSY. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. + * \retval -EINVAL Invalid input data, e.g. data size == 0. + * \retval -ENOBUFS Reserved memory in ecrt_slave_config_create_soe_request() + * too small. */ -EC_PUBLIC_API void ecrt_soe_request_write( +EC_PUBLIC_API int ecrt_soe_request_write( ec_soe_request_t *req /**< SoE request. */ ); @@ -2197,8 +2325,14 @@ EC_PUBLIC_API void ecrt_soe_request_write( * \attention After calling this function, the return value of * ecrt_soe_request_data() must be considered as invalid while * ecrt_soe_request_state() returns EC_REQUEST_BUSY. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_soe_request_read( +EC_PUBLIC_API int ecrt_soe_request_read( ec_soe_request_t *req /**< SoE request. */ ); @@ -2212,8 +2346,15 @@ EC_PUBLIC_API void ecrt_soe_request_read( * type at as header. These numbers can be set with this function. The values * are valid and will be used for future send operations until the next call * of this method. + * + * This method is meant to be called in non-realtime context (before master + * activation) to initialize the header data, but it is also safe to + * change the header later on in realtime context. + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_voe_handler_send_header( +EC_PUBLIC_API int ecrt_voe_handler_send_header( ec_voe_handler_t *voe, /**< VoE handler. */ uint32_t vendor_id, /**< Vendor ID. */ uint16_t vendor_type /**< Vendor-specific type. */ @@ -2226,8 +2367,14 @@ EC_PUBLIC_API void ecrt_voe_handler_send_header( * * The header information is stored at the memory given by the pointer * parameters. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_voe_handler_received_header( +EC_PUBLIC_API int ecrt_voe_handler_received_header( const ec_voe_handler_t *voe, /**< VoE handler. */ uint32_t *vendor_id, /**< Vendor ID. */ uint16_t *vendor_type /**< Vendor-specific type. */ @@ -2276,10 +2423,19 @@ EC_PUBLIC_API size_t ecrt_voe_handler_data_size( /** Start a VoE write operation. * * After this function has been called, the ecrt_voe_handler_execute() method - * must be called in every bus cycle as long as it returns EC_REQUEST_BUSY. No - * other operation may be started while the handler is busy. + * must be called in every realtime cycle as long as it returns + * EC_REQUEST_BUSY. No other operation may be started while the handler is + * busy. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. + * \retval -ENOBUFS Reserved memory in ecrt_slave_config_create_voe_handler + * too small. */ -EC_PUBLIC_API void ecrt_voe_handler_write( +EC_PUBLIC_API int ecrt_voe_handler_write( ec_voe_handler_t *voe, /**< VoE handler. */ size_t size /**< Number of bytes to write (without the VoE header). */ ); @@ -2287,8 +2443,9 @@ EC_PUBLIC_API void ecrt_voe_handler_write( /** Start a VoE read operation. * * After this function has been called, the ecrt_voe_handler_execute() method - * must be called in every bus cycle as long as it returns EC_REQUEST_BUSY. No - * other operation may be started while the handler is busy. + * must be called in every realtime cycle as long as it returns + * EC_REQUEST_BUSY. No other operation may be started while the handler is + * busy. * * The state machine queries the slave's send mailbox for new data to be send * to the master. If no data appear within the EC_VOE_RESPONSE_TIMEOUT @@ -2297,16 +2454,23 @@ EC_PUBLIC_API void ecrt_voe_handler_write( * On success, the size of the read data can be determined via * ecrt_voe_handler_data_size(), while the VoE header of the received data * can be retrieved with ecrt_voe_handler_received_header(). + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_voe_handler_read( +EC_PUBLIC_API int ecrt_voe_handler_read( ec_voe_handler_t *voe /**< VoE handler. */ ); /** Start a VoE read operation without querying the sync manager status. * * After this function has been called, the ecrt_voe_handler_execute() method - * must be called in every bus cycle as long as it returns EC_REQUEST_BUSY. No - * other operation may be started while the handler is busy. + * must be called in every realtime cycle as long as it returns + * EC_REQUEST_BUSY. No other operation may be started while the handler is + * busy. * * The state machine queries the slave by sending an empty mailbox. The slave * fills its data to the master in this mailbox. If no data appear within the @@ -2316,21 +2480,32 @@ EC_PUBLIC_API void ecrt_voe_handler_read( * On success, the size of the read data can be determined via * ecrt_voe_handler_data_size(), while the VoE header of the received data * can be retrieved with ecrt_voe_handler_received_header(). + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. */ -EC_PUBLIC_API void ecrt_voe_handler_read_nosync( +EC_PUBLIC_API int ecrt_voe_handler_read_nosync( ec_voe_handler_t *voe /**< VoE handler. */ ); /** Execute the handler. * - * This method executes the VoE handler. It has to be called in every bus + * This method executes the VoE handler. It has to be called in every realtime * cycle as long as it returns EC_REQUEST_BUSY. * * \return Handler state. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT */ EC_PUBLIC_API ec_request_state_t ecrt_voe_handler_execute( - ec_voe_handler_t *voe /**< VoE handler. */ - ); + ec_voe_handler_t *voe /**< VoE handler. */ + ); /***************************************************************************** * Register request methods. @@ -2353,17 +2528,27 @@ EC_PUBLIC_API ec_request_state_t ecrt_voe_handler_execute( * EC_WRITE_U16(ecrt_reg_request_data(reg_request), 0xFFFF); * \endcode * + * This method is meant to be called in realtime context (after master + * activation), but can also be used to initialize data before. + * * \return Pointer to the internal memory. + * + * \ingroup ApplicationInterfaceRT */ EC_PUBLIC_API uint8_t *ecrt_reg_request_data( const ec_reg_request_t *req /**< Register request. */ ); /** Get the current state of the register request. + * + * This method is meant to be called in realtime context (after master + * activation). * * \return Request state. + * + * \ingroup ApplicationInterfaceRT */ -ec_request_state_t ecrt_reg_request_state( +EC_PUBLIC_API ec_request_state_t ecrt_reg_request_state( const ec_reg_request_t *req /**< Register request. */ ); @@ -2374,8 +2559,16 @@ ec_request_state_t ecrt_reg_request_state( * * \attention The \a size parameter is truncated to the size given at request * creation. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. + * \retval -ENOBUFS Reserved memory in ecrt_slave_config_create_reg_request + * too small. */ -EC_PUBLIC_API void ecrt_reg_request_write( +EC_PUBLIC_API int ecrt_reg_request_write( ec_reg_request_t *req, /**< Register request. */ uint16_t address, /**< Register address. */ size_t size /**< Size to write. */ @@ -2388,8 +2581,16 @@ EC_PUBLIC_API void ecrt_reg_request_write( * * \attention The \a size parameter is truncated to the size given at request * creation. + * + * This method is meant to be called in realtime context (after master + * activation). + * + * \ingroup ApplicationInterfaceRT + * \return 0 on success, otherwise negative error code. + * \retval -ENOBUFS Reserved memory in ecrt_slave_config_create_reg_request + * too small. */ -EC_PUBLIC_API void ecrt_reg_request_read( +EC_PUBLIC_API int ecrt_reg_request_read( ec_reg_request_t *req, /**< Register request. */ uint16_t address, /**< Register address. */ size_t size /**< Size to write. */ diff --git a/lib/domain.c b/lib/domain.c index 26d01262..7382a24b 100644 --- a/lib/domain.c +++ b/lib/domain.c @@ -83,54 +83,40 @@ size_t ecrt_domain_size(const ec_domain_t *domain) /****************************************************************************/ -uint8_t *ecrt_domain_data(ec_domain_t *domain) +uint8_t *ecrt_domain_data(const ec_domain_t *domain) { - if (!domain->process_data) { - int offset = 0; - - offset = ioctl(domain->master->fd, EC_IOCTL_DOMAIN_OFFSET, - domain->index); - if (EC_IOCTL_IS_ERROR(offset)) { - fprintf(stderr, "Failed to get domain offset: %s\n", - strerror(EC_IOCTL_ERRNO(offset))); - return NULL; - } - - domain->process_data = domain->master->process_data + offset; - } - return domain->process_data; } /****************************************************************************/ -void ecrt_domain_process(ec_domain_t *domain) +int ecrt_domain_process(ec_domain_t *domain) { int ret; ret = ioctl(domain->master->fd, EC_IOCTL_DOMAIN_PROCESS, domain->index); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to process domain: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_domain_queue(ec_domain_t *domain) +int ecrt_domain_queue(ec_domain_t *domain) { int ret; ret = ioctl(domain->master->fd, EC_IOCTL_DOMAIN_QUEUE, domain->index); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to queue domain: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_domain_state(const ec_domain_t *domain, ec_domain_state_t *state) +int ecrt_domain_state(const ec_domain_t *domain, ec_domain_state_t *state) { ec_ioctl_domain_state_t data; int ret; @@ -140,9 +126,9 @@ void ecrt_domain_state(const ec_domain_t *domain, ec_domain_state_t *state) ret = ioctl(domain->master->fd, EC_IOCTL_DOMAIN_STATE, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to get domain state: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ diff --git a/lib/master.c b/lib/master.c index 25dd2279..af19365c 100644 --- a/lib/master.c +++ b/lib/master.c @@ -592,23 +592,37 @@ int ecrt_master_activate(ec_master_t *master) master->process_data[0] = 0x00; } + // pick up process data pointers for all created domains + ec_domain_t *domain = master->first_domain; + while (domain) { + int offset = ioctl(domain->master->fd, EC_IOCTL_DOMAIN_OFFSET, + domain->index); + if (EC_IOCTL_IS_ERROR(offset)) { + fprintf(stderr, "Failed to get domain offset: %s\n", + strerror(EC_IOCTL_ERRNO(offset))); + return -EC_IOCTL_ERRNO(offset); + } + + domain->process_data = master->process_data + offset; + domain = domain->next; + } + return 0; } /****************************************************************************/ -void ecrt_master_deactivate(ec_master_t *master) +int ecrt_master_deactivate(ec_master_t *master) { int ret; ret = ioctl(master->fd, EC_IOCTL_DEACTIVATE, NULL); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to deactivate master: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); - return; + return -EC_IOCTL_ERRNO(ret); } ec_master_clear_config(master); + return 0; } /****************************************************************************/ @@ -620,8 +634,6 @@ int ecrt_master_set_send_interval(ec_master_t *master, ret = ioctl(master->fd, EC_IOCTL_SET_SEND_INTERVAL, &send_interval_us); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to set send interval: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); return -EC_IOCTL_ERRNO(ret); } @@ -630,41 +642,41 @@ int ecrt_master_set_send_interval(ec_master_t *master, /****************************************************************************/ -void ecrt_master_send(ec_master_t *master) +int ecrt_master_send(ec_master_t *master) { int ret; ret = ioctl(master->fd, EC_IOCTL_SEND, NULL); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to send: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_master_receive(ec_master_t *master) +int ecrt_master_receive(ec_master_t *master) { int ret; ret = ioctl(master->fd, EC_IOCTL_RECEIVE, NULL); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to receive: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_master_state(const ec_master_t *master, ec_master_state_t *state) +int ecrt_master_state(const ec_master_t *master, ec_master_state_t *state) { int ret; ret = ioctl(master->fd, EC_IOCTL_MASTER_STATE, state); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to get master state: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -690,7 +702,7 @@ int ecrt_master_link_state(const ec_master_t *master, unsigned int dev_idx, /****************************************************************************/ -void ecrt_master_application_time(ec_master_t *master, uint64_t app_time) +int ecrt_master_application_time(ec_master_t *master, uint64_t app_time) { uint64_t time; int ret; @@ -699,27 +711,27 @@ void ecrt_master_application_time(ec_master_t *master, uint64_t app_time) ret = ioctl(master->fd, EC_IOCTL_APP_TIME, &time); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to set application time: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_master_sync_reference_clock(ec_master_t *master) +int ecrt_master_sync_reference_clock(ec_master_t *master) { int ret; ret = ioctl(master->fd, EC_IOCTL_SYNC_REF, NULL); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to sync reference clock: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_master_sync_reference_clock_to(ec_master_t *master, +int ecrt_master_sync_reference_clock_to(ec_master_t *master, uint64_t sync_time) { uint64_t time; @@ -729,22 +741,22 @@ void ecrt_master_sync_reference_clock_to(ec_master_t *master, ret = ioctl(master->fd, EC_IOCTL_SYNC_REF_TO, &time); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to sync reference clock: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_master_sync_slave_clocks(ec_master_t *master) +int ecrt_master_sync_slave_clocks(ec_master_t *master) { int ret; ret = ioctl(master->fd, EC_IOCTL_SYNC_SLAVES, NULL); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to sync slave clocks: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -758,6 +770,7 @@ int ecrt_master_reference_clock_time(const ec_master_t *master, if (EC_IOCTL_IS_ERROR(ret)) { fprintf(stderr, "Failed to get reference clock time: %s\n", strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } return ret; @@ -765,15 +778,15 @@ int ecrt_master_reference_clock_time(const ec_master_t *master, /****************************************************************************/ -void ecrt_master_sync_monitor_queue(ec_master_t *master) +int ecrt_master_sync_monitor_queue(ec_master_t *master) { int ret; ret = ioctl(master->fd, EC_IOCTL_SYNC_MON_QUEUE, NULL); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to queue sync monitor datagram: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -795,15 +808,15 @@ uint32_t ecrt_master_sync_monitor_process(const ec_master_t *master) /****************************************************************************/ -void ecrt_master_reset(ec_master_t *master) +int ecrt_master_reset(ec_master_t *master) { int ret; ret = ioctl(master->fd, EC_IOCTL_RESET, NULL); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to reset master: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ diff --git a/lib/reg_request.c b/lib/reg_request.c index 6fb04c98..f52afd15 100644 --- a/lib/reg_request.c +++ b/lib/reg_request.c @@ -89,7 +89,7 @@ ec_request_state_t ecrt_reg_request_state(const ec_reg_request_t *reg) /****************************************************************************/ -void ecrt_reg_request_write(ec_reg_request_t *reg, uint16_t address, +int ecrt_reg_request_write(ec_reg_request_t *reg, uint16_t address, size_t size) { ec_ioctl_reg_request_t io; @@ -103,14 +103,14 @@ void ecrt_reg_request_write(ec_reg_request_t *reg, uint16_t address, ret = ioctl(reg->config->master->fd, EC_IOCTL_REG_REQUEST_WRITE, &io); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to command an register write operation: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_reg_request_read(ec_reg_request_t *reg, uint16_t address, +int ecrt_reg_request_read(ec_reg_request_t *reg, uint16_t address, size_t size) { ec_ioctl_reg_request_t io; @@ -123,9 +123,9 @@ void ecrt_reg_request_read(ec_reg_request_t *reg, uint16_t address, ret = ioctl(reg->config->master->fd, EC_IOCTL_REG_REQUEST_READ, &io); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to command an register read operation: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ diff --git a/lib/sdo_request.c b/lib/sdo_request.c index 6271f642..509a349c 100644 --- a/lib/sdo_request.c +++ b/lib/sdo_request.c @@ -48,7 +48,7 @@ void ec_sdo_request_clear(ec_sdo_request_t *req) * Application interface. ****************************************************************************/ -void ecrt_sdo_request_index(ec_sdo_request_t *req, uint16_t index, +int ecrt_sdo_request_index(ec_sdo_request_t *req, uint16_t index, uint8_t subindex) { ec_ioctl_sdo_request_t data; @@ -61,14 +61,14 @@ void ecrt_sdo_request_index(ec_sdo_request_t *req, uint16_t index, ret = ioctl(req->config->master->fd, EC_IOCTL_SDO_REQUEST_INDEX, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to set SDO request index/subindex: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_sdo_request_timeout(ec_sdo_request_t *req, uint32_t timeout) +int ecrt_sdo_request_timeout(ec_sdo_request_t *req, uint32_t timeout) { ec_ioctl_sdo_request_t data; int ret; @@ -79,9 +79,9 @@ void ecrt_sdo_request_timeout(ec_sdo_request_t *req, uint32_t timeout) ret = ioctl(req->config->master->fd, EC_IOCTL_SDO_REQUEST_TIMEOUT, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to set SDO request timeout: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -139,7 +139,7 @@ ec_request_state_t ecrt_sdo_request_state(ec_sdo_request_t *req) /****************************************************************************/ -void ecrt_sdo_request_read(ec_sdo_request_t *req) +int ecrt_sdo_request_read(ec_sdo_request_t *req) { ec_ioctl_sdo_request_t data; int ret; @@ -149,14 +149,14 @@ void ecrt_sdo_request_read(ec_sdo_request_t *req) ret = ioctl(req->config->master->fd, EC_IOCTL_SDO_REQUEST_READ, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to command an SDO read operation : %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_sdo_request_write(ec_sdo_request_t *req) +int ecrt_sdo_request_write(ec_sdo_request_t *req) { ec_ioctl_sdo_request_t data; int ret; @@ -168,9 +168,9 @@ void ecrt_sdo_request_write(ec_sdo_request_t *req) ret = ioctl(req->config->master->fd, EC_IOCTL_SDO_REQUEST_WRITE, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to command an SDO write operation : %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ diff --git a/lib/slave_config.c b/lib/slave_config.c index 23223ecd..8e68ed2d 100644 --- a/lib/slave_config.c +++ b/lib/slave_config.c @@ -116,7 +116,7 @@ int ecrt_slave_config_sync_manager(ec_slave_config_t *sc, uint8_t sync_index, /****************************************************************************/ -void ecrt_slave_config_watchdog(ec_slave_config_t *sc, +int ecrt_slave_config_watchdog(ec_slave_config_t *sc, uint16_t divider, uint16_t intervals) { ec_ioctl_config_t data; @@ -131,7 +131,9 @@ void ecrt_slave_config_watchdog(ec_slave_config_t *sc, if (EC_IOCTL_IS_ERROR(ret)) { fprintf(stderr, "Failed to config watchdog: %s\n", strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -158,7 +160,7 @@ int ecrt_slave_config_pdo_assign_add(ec_slave_config_t *sc, /****************************************************************************/ -void ecrt_slave_config_pdo_assign_clear(ec_slave_config_t *sc, +int ecrt_slave_config_pdo_assign_clear(ec_slave_config_t *sc, uint8_t sync_index) { ec_ioctl_config_pdo_t data; @@ -171,7 +173,9 @@ void ecrt_slave_config_pdo_assign_clear(ec_slave_config_t *sc, if (EC_IOCTL_IS_ERROR(ret)) { fprintf(stderr, "Failed to clear PDOs: %s\n", strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -201,7 +205,7 @@ int ecrt_slave_config_pdo_mapping_add(ec_slave_config_t *sc, /****************************************************************************/ -void ecrt_slave_config_pdo_mapping_clear(ec_slave_config_t *sc, +int ecrt_slave_config_pdo_mapping_clear(ec_slave_config_t *sc, uint16_t pdo_index) { ec_ioctl_config_pdo_t data; @@ -214,7 +218,9 @@ void ecrt_slave_config_pdo_mapping_clear(ec_slave_config_t *sc, if (EC_IOCTL_IS_ERROR(ret)) { fprintf(stderr, "Failed to clear PDO entries: %s\n", strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -363,7 +369,7 @@ int ecrt_slave_config_reg_pdo_entry_pos( /****************************************************************************/ -void ecrt_slave_config_dc(ec_slave_config_t *sc, uint16_t assign_activate, +int ecrt_slave_config_dc(ec_slave_config_t *sc, uint16_t assign_activate, uint32_t sync0_cycle_time, int32_t sync0_shift_time, uint32_t sync1_cycle_time, int32_t sync1_shift_time) { @@ -379,9 +385,9 @@ void ecrt_slave_config_dc(ec_slave_config_t *sc, uint16_t assign_activate, ret = ioctl(sc->master->fd, EC_IOCTL_SC_DC, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to set DC parameters: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -820,7 +826,7 @@ ec_voe_handler_t *ecrt_slave_config_create_voe_handler(ec_slave_config_t *sc, /****************************************************************************/ -void ecrt_slave_config_state(const ec_slave_config_t *sc, +int ecrt_slave_config_state(const ec_slave_config_t *sc, ec_slave_config_state_t *state) { ec_ioctl_sc_state_t data; @@ -831,9 +837,9 @@ void ecrt_slave_config_state(const ec_slave_config_t *sc, ret = ioctl(sc->master->fd, EC_IOCTL_SC_STATE, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to get slave configuration state: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ diff --git a/lib/soe_request.c b/lib/soe_request.c index d95a4ef1..822e3c1e 100644 --- a/lib/soe_request.c +++ b/lib/soe_request.c @@ -48,7 +48,7 @@ void ec_soe_request_clear(ec_soe_request_t *req) * Application interface. ****************************************************************************/ -void ecrt_soe_request_idn(ec_soe_request_t *req, uint8_t drive_no, +int ecrt_soe_request_idn(ec_soe_request_t *req, uint8_t drive_no, uint16_t idn) { ec_ioctl_soe_request_t data; @@ -61,14 +61,14 @@ void ecrt_soe_request_idn(ec_soe_request_t *req, uint8_t drive_no, ret = ioctl(req->config->master->fd, EC_IOCTL_SOE_REQUEST_IDN, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to set SoE request IDN: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_soe_request_timeout(ec_soe_request_t *req, uint32_t timeout) +int ecrt_soe_request_timeout(ec_soe_request_t *req, uint32_t timeout) { ec_ioctl_soe_request_t data; int ret; @@ -79,9 +79,9 @@ void ecrt_soe_request_timeout(ec_soe_request_t *req, uint32_t timeout) ret = ioctl(req->config->master->fd, EC_IOCTL_SOE_REQUEST_TIMEOUT, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to set SoE request timeout: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -139,7 +139,7 @@ ec_request_state_t ecrt_soe_request_state(ec_soe_request_t *req) /****************************************************************************/ -void ecrt_soe_request_read(ec_soe_request_t *req) +int ecrt_soe_request_read(ec_soe_request_t *req) { ec_ioctl_soe_request_t data; int ret; @@ -149,14 +149,14 @@ void ecrt_soe_request_read(ec_soe_request_t *req) ret = ioctl(req->config->master->fd, EC_IOCTL_SOE_REQUEST_READ, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to command an SoE read operation : %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_soe_request_write(ec_soe_request_t *req) +int ecrt_soe_request_write(ec_soe_request_t *req) { ec_ioctl_soe_request_t data; int ret; @@ -168,9 +168,9 @@ void ecrt_soe_request_write(ec_soe_request_t *req) ret = ioctl(req->config->master->fd, EC_IOCTL_SOE_REQUEST_WRITE, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to command an SDO write operation : %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ diff --git a/lib/voe_handler.c b/lib/voe_handler.c index f0ae45c9..bc17c370 100644 --- a/lib/voe_handler.c +++ b/lib/voe_handler.c @@ -46,7 +46,7 @@ void ec_voe_handler_clear(ec_voe_handler_t *voe) /****************************************************************************/ -void ecrt_voe_handler_send_header(ec_voe_handler_t *voe, uint32_t vendor_id, +int ecrt_voe_handler_send_header(ec_voe_handler_t *voe, uint32_t vendor_id, uint16_t vendor_type) { ec_ioctl_voe_t data; @@ -59,14 +59,14 @@ void ecrt_voe_handler_send_header(ec_voe_handler_t *voe, uint32_t vendor_id, ret = ioctl(voe->config->master->fd, EC_IOCTL_VOE_SEND_HEADER, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to set VoE send header: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_voe_handler_received_header(const ec_voe_handler_t *voe, +int ecrt_voe_handler_received_header(const ec_voe_handler_t *voe, uint32_t *vendor_id, uint16_t *vendor_type) { ec_ioctl_voe_t data; @@ -79,9 +79,9 @@ void ecrt_voe_handler_received_header(const ec_voe_handler_t *voe, ret = ioctl(voe->config->master->fd, EC_IOCTL_VOE_REC_HEADER, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to get received VoE header: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ @@ -100,7 +100,7 @@ size_t ecrt_voe_handler_data_size(const ec_voe_handler_t *voe) /****************************************************************************/ -void ecrt_voe_handler_read(ec_voe_handler_t *voe) +int ecrt_voe_handler_read(ec_voe_handler_t *voe) { ec_ioctl_voe_t data; int ret; @@ -110,14 +110,14 @@ void ecrt_voe_handler_read(ec_voe_handler_t *voe) ret = ioctl(voe->config->master->fd, EC_IOCTL_VOE_READ, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to initiate VoE reading: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_voe_handler_read_nosync(ec_voe_handler_t *voe) +int ecrt_voe_handler_read_nosync(ec_voe_handler_t *voe) { ec_ioctl_voe_t data; int ret; @@ -127,14 +127,14 @@ void ecrt_voe_handler_read_nosync(ec_voe_handler_t *voe) ret = ioctl(voe->config->master->fd, EC_IOCTL_VOE_READ_NOSYNC, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to initiate VoE reading: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ -void ecrt_voe_handler_write(ec_voe_handler_t *voe, size_t size) +int ecrt_voe_handler_write(ec_voe_handler_t *voe, size_t size) { ec_ioctl_voe_t data; int ret; @@ -146,9 +146,9 @@ void ecrt_voe_handler_write(ec_voe_handler_t *voe, size_t size) ret = ioctl(voe->config->master->fd, EC_IOCTL_VOE_WRITE, &data); if (EC_IOCTL_IS_ERROR(ret)) { - fprintf(stderr, "Failed to initiate VoE writing: %s\n", - strerror(EC_IOCTL_ERRNO(ret))); + return -EC_IOCTL_ERRNO(ret); } + return 0; } /****************************************************************************/ diff --git a/master/Kbuild.in b/master/Kbuild.in index 75eff6ff..642ea1ff 100644 --- a/master/Kbuild.in +++ b/master/Kbuild.in @@ -90,8 +90,8 @@ CFLAGS_rtdm-ioctl.o := @XENOMAI_RTDM_CFLAGS@ endif ifeq (@ENABLE_RTAI@, 1) -CFLAGS_rtdm.o := -I@RTAI_DIR@/include -CFLAGS_rtdm-ioctl.o := -I@RTAI_DIR@/include +CFLAGS_rtdm.o := @RTAI_KERNEL_CFLAGS@ +CFLAGS_rtdm-ioctl.o := @RTAI_KERNEL_CFLAGS@ endif ec_master-objs += rtdm-ioctl.o diff --git a/master/device.c b/master/device.c index 5f873daa..598d163a 100644 --- a/master/device.c +++ b/master/device.c @@ -564,7 +564,7 @@ int ecdev_open(ec_device_t *device /**< EtherCAT device */) ret = ec_device_open(device); if (ret) { - EC_MASTER_ERR(master, "Failed to open device!\n"); + EC_MASTER_ERR(master, "Failed to open device: error %d!\n", ret); return ret; } diff --git a/master/domain.c b/master/domain.c index c61a3c1c..34eecb4b 100644 --- a/master/domain.c +++ b/master/domain.c @@ -440,14 +440,14 @@ void ecrt_domain_external_memory(ec_domain_t *domain, uint8_t *mem) /****************************************************************************/ -uint8_t *ecrt_domain_data(ec_domain_t *domain) +uint8_t *ecrt_domain_data(const ec_domain_t *domain) { return domain->data; } /****************************************************************************/ -void ecrt_domain_process(ec_domain_t *domain) +int ecrt_domain_process(ec_domain_t *domain) { uint16_t wc_sum[EC_MAX_NUM_DEVICES] = {}, wc_total; ec_datagram_pair_t *pair; @@ -633,11 +633,12 @@ void ecrt_domain_process(ec_domain_t *domain) domain->working_counter_changes = 0; } #endif + return 0; } /****************************************************************************/ -void ecrt_domain_queue(ec_domain_t *domain) +int ecrt_domain_queue(ec_domain_t *domain) { ec_datagram_pair_t *datagram_pair; ec_device_index_t dev_idx; @@ -663,11 +664,12 @@ void ecrt_domain_queue(ec_domain_t *domain) &datagram_pair->datagrams[dev_idx]); } } + return 0; } /****************************************************************************/ -void ecrt_domain_state(const ec_domain_t *domain, ec_domain_state_t *state) +int ecrt_domain_state(const ec_domain_t *domain, ec_domain_state_t *state) { unsigned int dev_idx; uint16_t wc = 0; @@ -690,6 +692,7 @@ void ecrt_domain_state(const ec_domain_t *domain, ec_domain_state_t *state) } state->redundancy_active = domain->redundancy_active; + return 0; } /****************************************************************************/ diff --git a/master/ethernet.c b/master/ethernet.c index dc519733..76b298df 100644 --- a/master/ethernet.c +++ b/master/ethernet.c @@ -38,6 +38,16 @@ #include "mailbox.h" #include "ethernet.h" +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif /****************************************************************************/ /** Defines the debug level of EoE processing. @@ -156,7 +166,7 @@ int ec_eoe_init( // initialize net_device eoe->dev->netdev_ops = &ec_eoedev_ops; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) || (SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5) eth_hw_addr_set(eoe->dev, mac_addr); #else memcpy(eoe->dev->dev_addr, mac_addr, sizeof(mac_addr)); @@ -184,7 +194,7 @@ int ec_eoe_init( // make the last address octet unique mac_addr[ETH_ALEN - 1] = (uint8_t) eoe->dev->ifindex; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) || (SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 5) eth_hw_addr_set(eoe->dev, mac_addr); #else memcpy(eoe->dev->dev_addr, mac_addr, sizeof(mac_addr)); diff --git a/master/fsm_change.c b/master/fsm_change.c index 9a09fa53..0dc10d6b 100644 --- a/master/fsm_change.c +++ b/master/fsm_change.c @@ -33,8 +33,11 @@ /****************************************************************************/ /** Timeout while waiting for AL state change [s]. + * + * ETG2000_S_R_V1i0i15 section 5.3.7.2 mentions 10 s as maximum AL state + * change timeout. */ -#define EC_AL_STATE_CHANGE_TIMEOUT 5 +#define EC_AL_STATE_CHANGE_TIMEOUT 10 /****************************************************************************/ diff --git a/master/fsm_coe.c b/master/fsm_coe.c index f12bbcd8..9b015de1 100644 --- a/master/fsm_coe.c +++ b/master/fsm_coe.c @@ -1309,7 +1309,7 @@ void ec_fsm_coe_down_start( if (slave->configured_rx_mailbox_size < EC_MBOX_HEADER_SIZE + EC_COE_DOWN_REQ_HEADER_SIZE) { EC_SLAVE_ERR(slave, "Mailbox too small!\n"); - request->errno = EOVERFLOW; + request->errno = ENOBUFS; fsm->state = ec_fsm_coe_error; return; } @@ -2463,7 +2463,7 @@ void ec_fsm_coe_up_seg_response( EC_SLAVE_ERR(slave, "SDO upload 0x%04X:%02X failed: Fragment" " exceeding complete size!\n", request->index, request->subindex); - request->errno = EOVERFLOW; + request->errno = ENOBUFS; fsm->state = ec_fsm_coe_error; return; } diff --git a/master/ioctl.c b/master/ioctl.c index f84944fb..3a5a44c7 100644 --- a/master/ioctl.c +++ b/master/ioctl.c @@ -1,6 +1,6 @@ /***************************************************************************** * - * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2024 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -1069,7 +1069,10 @@ static ATTRIBUTES int ec_ioctl_slave_reg_read( return ret; } - ecrt_reg_request_read(&request, io.address, io.size); + ret = ecrt_reg_request_read(&request, io.address, io.size); + if (ret) { + return ret; + } if (down_interruptible(&master->master_sem)) { ec_reg_request_clear(&request); @@ -1153,7 +1156,10 @@ static ATTRIBUTES int ec_ioctl_slave_reg_write( return -EFAULT; } - ecrt_reg_request_write(&request, io.address, io.size); + ret = ecrt_reg_request_write(&request, io.address, io.size); + if (ret) { + return ret; + } if (down_interruptible(&master->master_sem)) { ec_reg_request_clear(&request); @@ -2016,8 +2022,7 @@ static ATTRIBUTES int ec_ioctl_deactivate( if (unlikely(!ctx->requested)) return -EPERM; - ecrt_master_deactivate(master); - return 0; + return ecrt_master_deactivate(master); } /****************************************************************************/ @@ -2064,15 +2069,18 @@ static ATTRIBUTES int ec_ioctl_send( ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ ) { + int ret; + if (unlikely(!ctx->requested)) { return -EPERM; } if (ec_ioctl_lock_interruptible(&master->io_mutex)) return -EINTR; - ecrt_master_send(master); + + ret = ecrt_master_send(master); ec_ioctl_unlock(&master->io_mutex); - return 0; + return ret; } /****************************************************************************/ @@ -2087,15 +2095,18 @@ static ATTRIBUTES int ec_ioctl_receive( ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ ) { + int ret; + if (unlikely(!ctx->requested)) { return -EPERM; } if (ec_ioctl_lock_interruptible(&master->io_mutex)) return -EINTR; - ecrt_master_receive(master); + + ret = ecrt_master_receive(master); ec_ioctl_unlock(&master->io_mutex); - return 0; + return ret; } /****************************************************************************/ @@ -2111,8 +2122,11 @@ static ATTRIBUTES int ec_ioctl_master_state( ) { ec_master_state_t data; + int ret; - ecrt_master_state(master, &data); + ret = ecrt_master_state(master, &data); + if (ret) + return ret; if (ec_copy_to_user((void __user *) arg, &data, sizeof(data), ctx)) return -EFAULT; @@ -2174,8 +2188,7 @@ static ATTRIBUTES int ec_ioctl_app_time( return -EFAULT; } - ecrt_master_application_time(master, time); - return 0; + return ecrt_master_application_time(master, time); } /****************************************************************************/ @@ -2190,15 +2203,18 @@ static ATTRIBUTES int ec_ioctl_sync_ref( ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ ) { + int ret; + if (unlikely(!ctx->requested)) { return -EPERM; } if (ec_ioctl_lock_interruptible(&master->io_mutex)) return -EINTR; - ecrt_master_sync_reference_clock(master); + + ret = ecrt_master_sync_reference_clock(master); ec_ioctl_unlock(&master->io_mutex); - return 0; + return ret; } /****************************************************************************/ @@ -2213,6 +2229,7 @@ static ATTRIBUTES int ec_ioctl_sync_ref_to( ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ ) { + int ret; uint64_t time; if (unlikely(!ctx->requested)) @@ -2224,9 +2241,10 @@ static ATTRIBUTES int ec_ioctl_sync_ref_to( if (ec_ioctl_lock_interruptible(&master->io_mutex)) return -EINTR; - ecrt_master_sync_reference_clock_to(master, time); + + ret = ecrt_master_sync_reference_clock_to(master, time); ec_ioctl_unlock(&master->io_mutex); - return 0; + return ret; } /****************************************************************************/ @@ -2241,15 +2259,18 @@ static ATTRIBUTES int ec_ioctl_sync_slaves( ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ ) { + int ret; + if (unlikely(!ctx->requested)) { return -EPERM; } if (ec_ioctl_lock_interruptible(&master->io_mutex)) return -EINTR; - ecrt_master_sync_slave_clocks(master); + + ret = ecrt_master_sync_slave_clocks(master); ec_ioctl_unlock(&master->io_mutex); - return 0; + return ret; } /****************************************************************************/ @@ -2295,15 +2316,18 @@ static ATTRIBUTES int ec_ioctl_sync_mon_queue( ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ ) { + int ret; + if (unlikely(!ctx->requested)) { return -EPERM; } if (ec_ioctl_lock_interruptible(&master->io_mutex)) return -EINTR; - ecrt_master_sync_monitor_queue(master); + + ret = ecrt_master_sync_monitor_queue(master); ec_ioctl_unlock(&master->io_mutex); - return 0; + return ret; } /****************************************************************************/ @@ -2344,9 +2368,12 @@ static ATTRIBUTES int ec_ioctl_reset( ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ ) { - down(&master->master_sem); - ecrt_master_reset(master); - up(&master->master_sem); +#ifdef EC_IOCTL_RTDM + /* Xenomai/LXRT is like NMI context, so we do a two-stage schedule. */ + irq_work_queue(&master->sc_reset_work_kicker); +#else + schedule_work(&master->sc_reset_work); +#endif return 0; } @@ -2439,7 +2466,7 @@ static ATTRIBUTES int ec_ioctl_sc_watchdog( goto out_up; } - ecrt_slave_config_watchdog(sc, + ret = ecrt_slave_config_watchdog(sc, data.watchdog_divider, data.watchdog_intervals); out_up: @@ -2513,8 +2540,7 @@ static ATTRIBUTES int ec_ioctl_sc_clear_pdos( up(&master->master_sem); /** \todo sc could be invalidated */ - ecrt_slave_config_pdo_assign_clear(sc, data.sync_index); - return 0; + return ecrt_slave_config_pdo_assign_clear(sc, data.sync_index); } /****************************************************************************/ @@ -2583,8 +2609,7 @@ static ATTRIBUTES int ec_ioctl_sc_clear_entries( up(&master->master_sem); /** \todo sc could be invalidated */ - ecrt_slave_config_pdo_mapping_clear(sc, data.index); - return 0; + return ecrt_slave_config_pdo_mapping_clear(sc, data.index); } /****************************************************************************/ @@ -2698,6 +2723,7 @@ static ATTRIBUTES int ec_ioctl_sc_dc( { ec_ioctl_config_t data; ec_slave_config_t *sc; + int ret; if (unlikely(!ctx->requested)) return -EPERM; @@ -2713,7 +2739,7 @@ static ATTRIBUTES int ec_ioctl_sc_dc( return -ENOENT; } - ecrt_slave_config_dc(sc, data.dc_assign_activate, + ret = ecrt_slave_config_dc(sc, data.dc_assign_activate, data.dc_sync[0].cycle_time, data.dc_sync[0].shift_time, data.dc_sync[1].cycle_time, @@ -2721,7 +2747,7 @@ static ATTRIBUTES int ec_ioctl_sc_dc( up(&master->master_sem); - return 0; + return ret; } /****************************************************************************/ @@ -3166,6 +3192,7 @@ static ATTRIBUTES int ec_ioctl_sc_state( ec_ioctl_sc_state_t data; const ec_slave_config_t *sc; ec_slave_config_state_t state; + int ret; if (unlikely(!ctx->requested)) return -EPERM; @@ -3181,7 +3208,9 @@ static ATTRIBUTES int ec_ioctl_sc_state( return -ENOENT; } - ecrt_slave_config_state(sc, &state); + ret = ecrt_slave_config_state(sc, &state); + if (ret) + return ret; if (ec_copy_to_user((void __user *) data.state, &state, sizeof(state), ctx)) @@ -3458,8 +3487,7 @@ static ATTRIBUTES int ec_ioctl_domain_process( return -ENOENT; } - ecrt_domain_process(domain); - return 0; + return ecrt_domain_process(domain); } /****************************************************************************/ @@ -3475,6 +3503,7 @@ static ATTRIBUTES int ec_ioctl_domain_queue( ) { ec_domain_t *domain; + int ret; if (unlikely(!ctx->requested)) return -EPERM; @@ -3488,9 +3517,10 @@ static ATTRIBUTES int ec_ioctl_domain_queue( if (ec_ioctl_lock_interruptible(&master->io_mutex)) return -EINTR; - ecrt_domain_queue(domain); + + ret = ecrt_domain_queue(domain); ec_ioctl_unlock(&master->io_mutex); - return 0; + return ret; } /****************************************************************************/ @@ -3508,6 +3538,7 @@ static ATTRIBUTES int ec_ioctl_domain_state( ec_ioctl_domain_state_t data; const ec_domain_t *domain; ec_domain_state_t state; + int ret; if (unlikely(!ctx->requested)) return -EPERM; @@ -3523,9 +3554,12 @@ static ATTRIBUTES int ec_ioctl_domain_state( return -ENOENT; } - ecrt_domain_state(domain, &state); + ret = ecrt_domain_state(domain, &state); + if (ret) + return ret; - if (ec_copy_to_user((void __user *) data.state, &state, sizeof(state), ctx)) + if (ec_copy_to_user((void __user *) data.state, &state, sizeof(state), + ctx)) return -EFAULT; return 0; @@ -3564,8 +3598,7 @@ static ATTRIBUTES int ec_ioctl_sdo_request_index( return -ENOENT; } - ecrt_sdo_request_index(req, data.sdo_index, data.sdo_subindex); - return 0; + return ecrt_sdo_request_index(req, data.sdo_index, data.sdo_subindex); } /****************************************************************************/ @@ -3601,13 +3634,14 @@ static ATTRIBUTES int ec_ioctl_sdo_request_timeout( return -ENOENT; } - ecrt_sdo_request_timeout(req, data.timeout); - return 0; + return ecrt_sdo_request_timeout(req, data.timeout); } /****************************************************************************/ /** Gets an SDO request's state. + * + * Also pre-fetches the size of incoming data. * * \return Zero on success, otherwise a negative error code. */ @@ -3683,8 +3717,7 @@ static ATTRIBUTES int ec_ioctl_sdo_request_read( return -ENOENT; } - ecrt_sdo_request_read(req); - return 0; + return ecrt_sdo_request_read(req); } /****************************************************************************/ @@ -3702,7 +3735,6 @@ static ATTRIBUTES int ec_ioctl_sdo_request_write( ec_ioctl_sdo_request_t data; ec_slave_config_t *sc; ec_sdo_request_t *req; - int ret; if (unlikely(!ctx->requested)) return -EPERM; @@ -3726,17 +3758,15 @@ static ATTRIBUTES int ec_ioctl_sdo_request_write( return -ENOENT; } - ret = ec_sdo_request_alloc(req, data.size); - if (ret) - return ret; + if (data.size > req->mem_size) + return -ENOBUFS; if (ec_copy_from_user(req->data, (void __user *) data.data, data.size, ctx)) return -EFAULT; req->data_size = data.size; - ecrt_sdo_request_write(req); - return 0; + return ecrt_sdo_request_write(req); } /****************************************************************************/ @@ -3812,8 +3842,7 @@ static ATTRIBUTES int ec_ioctl_soe_request_index( return -ENOENT; } - ecrt_soe_request_idn(req, data.drive_no, data.idn); - return 0; + return ecrt_soe_request_idn(req, data.drive_no, data.idn); } /****************************************************************************/ @@ -3849,8 +3878,7 @@ static ATTRIBUTES int ec_ioctl_soe_request_timeout( return -ENOENT; } - ecrt_soe_request_timeout(req, data.timeout); - return 0; + return ecrt_soe_request_timeout(req, data.timeout); } /****************************************************************************/ @@ -3933,8 +3961,7 @@ static ATTRIBUTES int ec_ioctl_soe_request_read( return -ENOENT; } - ecrt_soe_request_read(req); - return 0; + return ecrt_soe_request_read(req); } /****************************************************************************/ @@ -3952,7 +3979,6 @@ static ATTRIBUTES int ec_ioctl_soe_request_write( ec_ioctl_soe_request_t data; ec_slave_config_t *sc; ec_soe_request_t *req; - int ret; if (unlikely(!ctx->requested)) return -EPERM; @@ -3976,17 +4002,15 @@ static ATTRIBUTES int ec_ioctl_soe_request_write( return -ENOENT; } - ret = ec_soe_request_alloc(req, data.size); - if (ret) - return ret; + if (data.size > req->mem_size) + return -ENOBUFS; if (ec_copy_from_user(req->data, (void __user *) data.data, data.size, ctx)) return -EFAULT; req->data_size = data.size; - ecrt_soe_request_write(req); - return 0; + return ecrt_soe_request_write(req); } /****************************************************************************/ @@ -4157,7 +4181,7 @@ static ATTRIBUTES int ec_ioctl_reg_request_write( } if (io.transfer_size > reg->mem_size) { - return -EOVERFLOW; + return -ENOBUFS; } if (ec_copy_from_user(reg->data, (void __user *) io.data, @@ -4165,8 +4189,7 @@ static ATTRIBUTES int ec_ioctl_reg_request_write( return -EFAULT; } - ecrt_reg_request_write(reg, io.address, io.transfer_size); - return 0; + return ecrt_reg_request_write(reg, io.address, io.transfer_size); } /****************************************************************************/ @@ -4205,11 +4228,10 @@ static ATTRIBUTES int ec_ioctl_reg_request_read( } if (io.transfer_size > reg->mem_size) { - return -EOVERFLOW; + return -ENOBUFS; } - ecrt_reg_request_read(reg, io.address, io.transfer_size); - return 0; + return ecrt_reg_request_read(reg, io.address, io.transfer_size); } /****************************************************************************/ @@ -4254,8 +4276,7 @@ static ATTRIBUTES int ec_ioctl_voe_send_header( return -ENOENT; } - ecrt_voe_handler_send_header(voe, vendor_id, vendor_type); - return 0; + return ecrt_voe_handler_send_header(voe, vendor_id, vendor_type); } /****************************************************************************/ @@ -4275,6 +4296,7 @@ static ATTRIBUTES int ec_ioctl_voe_rec_header( ec_voe_handler_t *voe; uint32_t vendor_id; uint16_t vendor_type; + int ret; if (unlikely(!ctx->requested)) return -EPERM; @@ -4293,7 +4315,9 @@ static ATTRIBUTES int ec_ioctl_voe_rec_header( return -ENOENT; } - ecrt_voe_handler_received_header(voe, &vendor_id, &vendor_type); + ret = ecrt_voe_handler_received_header(voe, &vendor_id, &vendor_type); + if (ret) + return ret; if (likely(data.vendor_id)) if (ec_copy_to_user(data.vendor_id, &vendor_id, @@ -4341,8 +4365,7 @@ static ATTRIBUTES int ec_ioctl_voe_read( return -ENOENT; } - ecrt_voe_handler_read(voe); - return 0; + return ecrt_voe_handler_read(voe); } /****************************************************************************/ @@ -4378,8 +4401,7 @@ static ATTRIBUTES int ec_ioctl_voe_read_nosync( return -ENOENT; } - ecrt_voe_handler_read_nosync(voe); - return 0; + return ecrt_voe_handler_read_nosync(voe); } /****************************************************************************/ @@ -4417,15 +4439,14 @@ static ATTRIBUTES int ec_ioctl_voe_write( if (data.size) { if (data.size > ec_voe_handler_mem_size(voe)) - return -EOVERFLOW; + return -ENOBUFS; if (ec_copy_from_user(ecrt_voe_handler_data(voe), (void __user *) data.data, data.size, ctx)) return -EFAULT; } - ecrt_voe_handler_write(voe, data.size); - return 0; + return ecrt_voe_handler_write(voe, data.size); } /****************************************************************************/ @@ -4463,6 +4484,7 @@ static ATTRIBUTES int ec_ioctl_voe_exec( if (ec_ioctl_lock_interruptible(&master->io_mutex)) return -EINTR; + data.state = ecrt_voe_handler_execute(voe); ec_ioctl_unlock(&master->io_mutex); if (data.state == EC_REQUEST_SUCCESS && voe->dir == EC_DIR_INPUT) @@ -4593,7 +4615,7 @@ static ATTRIBUTES int ec_ioctl_slave_foe_read( if (request.data_size > io.buffer_size) { EC_SLAVE_ERR(slave, "%s(): Buffer too small.\n", __func__); ec_foe_request_clear(&request); - return -EOVERFLOW; + return -ENOBUFS; } io.data_size = request.data_size; if (copy_to_user((void __user *) io.buffer, @@ -4795,7 +4817,9 @@ static ATTRIBUTES int ec_ioctl_slave_soe_write( return retval; } -/****************************************************************************/ +/***************************************************************************** + * ioctl() file operation functions + ****************************************************************************/ /** ioctl() function to use. * @@ -4810,6 +4834,8 @@ static long ec_ioctl_nrt( void *arg /**< ioctl() argument. */); #endif +/****************************************************************************/ + /** Called when an ioctl() command is issued. * Both RT and nRT context. * @@ -4828,13 +4854,6 @@ static long ec_ioctl_both( case EC_IOCTL_MODULE: ret = ec_ioctl_module(arg, ctx); break; - case EC_IOCTL_MASTER_DEBUG: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_master_debug(master, arg); - break; case EC_IOCTL_MASTER_RESCAN: if (!ctx->writable) { ret = -EPERM; @@ -4848,50 +4867,6 @@ static long ec_ioctl_both( case EC_IOCTL_MASTER_LINK_STATE: ret = ec_ioctl_master_link_state(master, arg, ctx); break; - case EC_IOCTL_APP_TIME: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_app_time(master, arg, ctx); - break; - case EC_IOCTL_REF_CLOCK_TIME: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_ref_clock_time(master, arg, ctx); - break; - case EC_IOCTL_SC_EMERG_POP: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_sc_emerg_pop(master, arg, ctx); - break; - case EC_IOCTL_SC_EMERG_CLEAR: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_sc_emerg_clear(master, arg, ctx); - break; - case EC_IOCTL_SC_EMERG_OVERRUNS: - ret = ec_ioctl_sc_emerg_overruns(master, arg, ctx); - break; - case EC_IOCTL_SC_STATE: - ret = ec_ioctl_sc_state(master, arg, ctx); - break; - case EC_IOCTL_DOMAIN_STATE: - ret = ec_ioctl_domain_state(master, arg, ctx); - break; - case EC_IOCTL_SDO_REQUEST_INDEX: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_sdo_request_index(master, arg, ctx); - break; case EC_IOCTL_SDO_REQUEST_TIMEOUT: if (!ctx->writable) { ret = -EPERM; @@ -4899,33 +4874,9 @@ static long ec_ioctl_both( } ret = ec_ioctl_sdo_request_timeout(master, arg, ctx); break; - case EC_IOCTL_SDO_REQUEST_STATE: - ret = ec_ioctl_sdo_request_state(master, arg, ctx); - break; - case EC_IOCTL_SDO_REQUEST_READ: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_sdo_request_read(master, arg, ctx); - break; - case EC_IOCTL_SDO_REQUEST_WRITE: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_sdo_request_write(master, arg, ctx); - break; case EC_IOCTL_SDO_REQUEST_DATA: ret = ec_ioctl_sdo_request_data(master, arg, ctx); break; - case EC_IOCTL_SOE_REQUEST_IDN: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_soe_request_index(master, arg, ctx); - break; case EC_IOCTL_SOE_REQUEST_TIMEOUT: if (!ctx->writable) { ret = -EPERM; @@ -4933,46 +4884,12 @@ static long ec_ioctl_both( } ret = ec_ioctl_soe_request_timeout(master, arg, ctx); break; - case EC_IOCTL_SOE_REQUEST_STATE: - ret = ec_ioctl_soe_request_state(master, arg, ctx); - break; - case EC_IOCTL_SOE_REQUEST_READ: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_soe_request_read(master, arg, ctx); - break; - case EC_IOCTL_SOE_REQUEST_WRITE: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_soe_request_write(master, arg, ctx); - break; case EC_IOCTL_SOE_REQUEST_DATA: ret = ec_ioctl_soe_request_data(master, arg, ctx); break; case EC_IOCTL_REG_REQUEST_DATA: ret = ec_ioctl_reg_request_data(master, arg, ctx); break; - case EC_IOCTL_REG_REQUEST_STATE: - ret = ec_ioctl_reg_request_state(master, arg, ctx); - break; - case EC_IOCTL_REG_REQUEST_WRITE: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_reg_request_write(master, arg, ctx); - break; - case EC_IOCTL_REG_REQUEST_READ: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_reg_request_read(master, arg, ctx); - break; case EC_IOCTL_VOE_SEND_HEADER: if (!ctx->writable) { ret = -EPERM; @@ -4980,30 +4897,6 @@ static long ec_ioctl_both( } ret = ec_ioctl_voe_send_header(master, arg, ctx); break; - case EC_IOCTL_VOE_REC_HEADER: - ret = ec_ioctl_voe_rec_header(master, arg, ctx); - break; - case EC_IOCTL_VOE_READ: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_voe_read(master, arg, ctx); - break; - case EC_IOCTL_VOE_READ_NOSYNC: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_voe_read_nosync(master, arg, ctx); - break; - case EC_IOCTL_VOE_WRITE: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_voe_write(master, arg, ctx); - break; case EC_IOCTL_VOE_DATA: ret = ec_ioctl_voe_data(master, arg, ctx); break; @@ -5020,6 +4913,8 @@ static long ec_ioctl_both( return ret; } +/****************************************************************************/ + /** Called when an ioctl() command is issued. * RTDM: RT only. * @@ -5058,6 +4953,13 @@ long ec_ioctl } ret = ec_ioctl_receive(master, arg, ctx); break; + case EC_IOCTL_APP_TIME: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_app_time(master, arg, ctx); + break; case EC_IOCTL_SYNC_REF: if (!ctx->writable) { ret = -EPERM; @@ -5079,6 +4981,13 @@ long ec_ioctl } ret = ec_ioctl_sync_slaves(master, arg, ctx); break; + case EC_IOCTL_REF_CLOCK_TIME: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_ref_clock_time(master, arg, ctx); + break; case EC_IOCTL_SYNC_MON_QUEUE: if (!ctx->writable) { ret = -EPERM; @@ -5093,6 +5002,33 @@ long ec_ioctl } ret = ec_ioctl_sync_mon_process(master, arg, ctx); break; + case EC_IOCTL_RESET: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_reset(master, arg, ctx); + break; + case EC_IOCTL_SC_EMERG_POP: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_sc_emerg_pop(master, arg, ctx); + break; + case EC_IOCTL_SC_EMERG_CLEAR: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_sc_emerg_clear(master, arg, ctx); + break; + case EC_IOCTL_SC_EMERG_OVERRUNS: + ret = ec_ioctl_sc_emerg_overruns(master, arg, ctx); + break; + case EC_IOCTL_SC_STATE: + ret = ec_ioctl_sc_state(master, arg, ctx); + break; case EC_IOCTL_DOMAIN_PROCESS: if (!ctx->writable) { ret = -EPERM; @@ -5107,6 +5043,98 @@ long ec_ioctl } ret = ec_ioctl_domain_queue(master, arg, ctx); break; + case EC_IOCTL_DOMAIN_STATE: + ret = ec_ioctl_domain_state(master, arg, ctx); + break; + case EC_IOCTL_SDO_REQUEST_INDEX: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_sdo_request_index(master, arg, ctx); + break; + case EC_IOCTL_SDO_REQUEST_STATE: + ret = ec_ioctl_sdo_request_state(master, arg, ctx); + break; + case EC_IOCTL_SDO_REQUEST_READ: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_sdo_request_read(master, arg, ctx); + break; + case EC_IOCTL_SDO_REQUEST_WRITE: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_sdo_request_write(master, arg, ctx); + break; + case EC_IOCTL_SOE_REQUEST_IDN: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_soe_request_index(master, arg, ctx); + break; + case EC_IOCTL_SOE_REQUEST_STATE: + ret = ec_ioctl_soe_request_state(master, arg, ctx); + break; + case EC_IOCTL_SOE_REQUEST_READ: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_soe_request_read(master, arg, ctx); + break; + case EC_IOCTL_SOE_REQUEST_WRITE: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_soe_request_write(master, arg, ctx); + break; + case EC_IOCTL_REG_REQUEST_STATE: + ret = ec_ioctl_reg_request_state(master, arg, ctx); + break; + case EC_IOCTL_REG_REQUEST_WRITE: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_reg_request_write(master, arg, ctx); + break; + case EC_IOCTL_REG_REQUEST_READ: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_reg_request_read(master, arg, ctx); + break; + case EC_IOCTL_VOE_REC_HEADER: + ret = ec_ioctl_voe_rec_header(master, arg, ctx); + break; + case EC_IOCTL_VOE_READ: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_voe_read(master, arg, ctx); + break; + case EC_IOCTL_VOE_READ_NOSYNC: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_voe_read_nosync(master, arg, ctx); + break; + case EC_IOCTL_VOE_WRITE: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_voe_write(master, arg, ctx); + break; case EC_IOCTL_VOE_EXEC: if (!ctx->writable) { ret = -EPERM; @@ -5131,6 +5159,8 @@ long ec_ioctl return ret; } +/****************************************************************************/ + /** Called when an ioctl() command is issued. * nRT context only. * @@ -5179,6 +5209,13 @@ static long ec_ioctl_nrt case EC_IOCTL_DOMAIN_DATA: ret = ec_ioctl_domain_data(master, arg); break; + case EC_IOCTL_MASTER_DEBUG: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_master_debug(master, arg); + break; case EC_IOCTL_SLAVE_STATE: if (!ctx->writable) { ret = -EPERM; @@ -5193,6 +5230,10 @@ static long ec_ioctl_nrt ret = ec_ioctl_slave_sdo_entry(master, arg); break; case EC_IOCTL_SLAVE_SDO_UPLOAD: + if (!ctx->writable) { + ret = -EPERM; + break; + } ret = ec_ioctl_slave_sdo_upload(master, arg); break; case EC_IOCTL_SLAVE_SDO_DOWNLOAD: @@ -5213,6 +5254,10 @@ static long ec_ioctl_nrt ret = ec_ioctl_slave_sii_write(master, arg); break; case EC_IOCTL_SLAVE_REG_READ: + if (!ctx->writable) { + ret = -EPERM; + break; + } ret = ec_ioctl_slave_reg_read(master, arg); break; case EC_IOCTL_SLAVE_REG_WRITE: @@ -5223,6 +5268,10 @@ static long ec_ioctl_nrt ret = ec_ioctl_slave_reg_write(master, arg); break; case EC_IOCTL_SLAVE_FOE_READ: + if (!ctx->writable) { + ret = -EPERM; + break; + } ret = ec_ioctl_slave_foe_read(master, arg); break; case EC_IOCTL_SLAVE_FOE_WRITE: @@ -5233,8 +5282,19 @@ static long ec_ioctl_nrt ret = ec_ioctl_slave_foe_write(master, arg); break; case EC_IOCTL_SLAVE_SOE_READ: + if (!ctx->writable) { + ret = -EPERM; + break; + } ret = ec_ioctl_slave_soe_read(master, arg); break; + case EC_IOCTL_SLAVE_SOE_WRITE: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_slave_soe_write(master, arg); + break; #ifdef EC_EOE case EC_IOCTL_SLAVE_EOE_IP_PARAM: if (!ctx->writable) { @@ -5244,13 +5304,6 @@ static long ec_ioctl_nrt ret = ec_ioctl_slave_eoe_ip_param(master, arg); break; #endif - case EC_IOCTL_SLAVE_SOE_WRITE: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_slave_soe_write(master, arg); - break; case EC_IOCTL_CONFIG: ret = ec_ioctl_config(master, arg); break; @@ -5277,6 +5330,9 @@ static long ec_ioctl_nrt ret = ec_ioctl_eoe_handler(master, arg); break; #endif + + /* Application interface */ + case EC_IOCTL_REQUEST: if (!ctx->writable) { ret = -EPERM; @@ -5319,13 +5375,6 @@ static long ec_ioctl_nrt } ret = ec_ioctl_deactivate(master, arg, ctx); break; - case EC_IOCTL_RESET: - if (!ctx->writable) { - ret = -EPERM; - break; - } - ret = ec_ioctl_reset(master, arg, ctx); - break; case EC_IOCTL_SC_SYNC: if (!ctx->writable) { ret = -EPERM; diff --git a/master/mailbox.c b/master/mailbox.c index dacf3392..05a10fd1 100644 --- a/master/mailbox.c +++ b/master/mailbox.c @@ -60,7 +60,7 @@ uint8_t *ec_slave_mbox_prepare_send(const ec_slave_t *slave, /**< slave */ if (unlikely(total_size > slave->configured_rx_mailbox_size)) { EC_SLAVE_ERR(slave, "Data size (%zu) does not fit in mailbox (%u)!\n", total_size, slave->configured_rx_mailbox_size); - return ERR_PTR(-EOVERFLOW); + return ERR_PTR(-ENOBUFS); } ret = ec_datagram_fpwr(datagram, slave->station_address, diff --git a/master/master.c b/master/master.c index 8a8c7744..d327337a 100644 --- a/master/master.c +++ b/master/master.c @@ -107,6 +107,8 @@ static int ec_master_eoe_thread(void *); void ec_master_find_dc_ref_clock(ec_master_t *); void ec_master_clear_device_stats(ec_master_t *); void ec_master_update_device_stats(ec_master_t *); +static void sc_reset_task_kicker(struct irq_work *work); +static void sc_reset_task(struct work_struct *work); /****************************************************************************/ @@ -316,6 +318,9 @@ int ec_master_init(ec_master_t *master, /**< EtherCAT master */ master->dc_ref_config = NULL; master->dc_ref_clock = NULL; + INIT_WORK(&master->sc_reset_work, sc_reset_task); + init_irq_work(&master->sc_reset_work_kicker, sc_reset_task_kicker); + // init character device ret = ec_cdev_init(&master->cdev, master, device_number); if (ret) @@ -383,6 +388,9 @@ void ec_master_clear( ec_cdev_clear(&master->cdev); + irq_work_sync(&master->sc_reset_work_kicker); + cancel_work_sync(&master->sc_reset_work); + #ifdef EC_EOE ec_master_clear_eoe_handlers(master); #endif @@ -2342,7 +2350,7 @@ int ecrt_master_activate(ec_master_t *master) /****************************************************************************/ -void ecrt_master_deactivate(ec_master_t *master) +int ecrt_master_deactivate(ec_master_t *master) { ec_slave_t *slave; #ifdef EC_EOE @@ -2354,7 +2362,7 @@ void ecrt_master_deactivate(ec_master_t *master) if (!master->active) { EC_MASTER_WARN(master, "%s: Master not active.\n", __func__); - return; + return -EINVAL; } ec_master_thread_stop(master); @@ -2408,11 +2416,12 @@ void ecrt_master_deactivate(ec_master_t *master) master->allow_scan = 0; master->active = 0; + return 0; } /****************************************************************************/ -void ecrt_master_send(ec_master_t *master) +int ecrt_master_send(ec_master_t *master) { ec_datagram_t *datagram, *n; ec_device_index_t dev_idx; @@ -2452,11 +2461,12 @@ void ecrt_master_send(ec_master_t *master) // send frames ec_master_send_datagrams(master, dev_idx); } + return 0; } /****************************************************************************/ -void ecrt_master_receive(ec_master_t *master) +int ecrt_master_receive(ec_master_t *master) { unsigned int dev_idx; ec_datagram_t *datagram, *next; @@ -2504,11 +2514,12 @@ void ecrt_master_receive(ec_master_t *master) #endif /* RT_SYSLOG */ } } + return 0; } /****************************************************************************/ -void ecrt_master_send_ext(ec_master_t *master) +int ecrt_master_send_ext(ec_master_t *master) { ec_datagram_t *datagram, *next; @@ -2520,7 +2531,7 @@ void ecrt_master_send_ext(ec_master_t *master) } up(&master->ext_queue_sem); - ecrt_master_send(master); + return ecrt_master_send(master); } /****************************************************************************/ @@ -2719,7 +2730,7 @@ void ecrt_master_callbacks(ec_master_t *master, /****************************************************************************/ -void ecrt_master_state(const ec_master_t *master, ec_master_state_t *state) +int ecrt_master_state(const ec_master_t *master, ec_master_state_t *state) { ec_device_index_t dev_idx; @@ -2738,6 +2749,7 @@ void ecrt_master_state(const ec_master_t *master, ec_master_state_t *state) /* Signal link up if at least one device has link. */ state->link_up |= master->devices[dev_idx].link_state; } + return 0; } /****************************************************************************/ @@ -2758,13 +2770,14 @@ int ecrt_master_link_state(const ec_master_t *master, unsigned int dev_idx, /****************************************************************************/ -void ecrt_master_application_time(ec_master_t *master, uint64_t app_time) +int ecrt_master_application_time(ec_master_t *master, uint64_t app_time) { master->app_time = app_time; if (unlikely(!master->dc_ref_time)) { master->dc_ref_time = app_time; } + return 0; } /****************************************************************************/ @@ -2789,17 +2802,20 @@ int ecrt_master_reference_clock_time(const ec_master_t *master, /****************************************************************************/ -void ecrt_master_sync_reference_clock(ec_master_t *master) +int ecrt_master_sync_reference_clock(ec_master_t *master) { if (master->dc_ref_clock) { EC_WRITE_U32(master->ref_sync_datagram.data, master->app_time); ec_master_queue_datagram(master, &master->ref_sync_datagram); + } else { + return -ENXIO; } + return 0; } /****************************************************************************/ -void ecrt_master_sync_reference_clock_to( +int ecrt_master_sync_reference_clock_to( ec_master_t *master, uint64_t sync_time ) @@ -2807,25 +2823,32 @@ void ecrt_master_sync_reference_clock_to( if (master->dc_ref_clock) { EC_WRITE_U32(master->ref_sync_datagram.data, sync_time); ec_master_queue_datagram(master, &master->ref_sync_datagram); + } else { + return -ENXIO; } + return 0; } /****************************************************************************/ -void ecrt_master_sync_slave_clocks(ec_master_t *master) +int ecrt_master_sync_slave_clocks(ec_master_t *master) { if (master->dc_ref_clock) { ec_datagram_zero(&master->sync_datagram); ec_master_queue_datagram(master, &master->sync_datagram); + } else { + return -ENXIO; } + return 0; } /****************************************************************************/ -void ecrt_master_sync_monitor_queue(ec_master_t *master) +int ecrt_master_sync_monitor_queue(ec_master_t *master) { ec_datagram_zero(&master->sync_mon_datagram); ec_master_queue_datagram(master, &master->sync_mon_datagram); + return 0; } /****************************************************************************/ @@ -3069,7 +3092,7 @@ int ecrt_master_sdo_upload(ec_master_t *master, uint16_t slave_position, } else { if (request.data_size > target_size) { EC_SLAVE_ERR(slave, "%s(): Buffer too small.\n", __func__); - ret = -EOVERFLOW; + ret = -ENOBUFS; } else { memcpy(target, request.data, request.data_size); @@ -3227,7 +3250,7 @@ int ecrt_master_read_idn(ec_master_t *master, uint16_t slave_position, } else { // success if (request.data_size > target_size) { EC_SLAVE_ERR(slave, "%s(): Buffer too small.\n", __func__); - ret = -EOVERFLOW; + ret = -ENOBUFS; } else { // data fits in buffer if (result_size) { @@ -3244,7 +3267,7 @@ int ecrt_master_read_idn(ec_master_t *master, uint16_t slave_position, /****************************************************************************/ -void ecrt_master_reset(ec_master_t *master) +int ecrt_master_reset(ec_master_t *master) { ec_slave_config_t *sc; @@ -3253,6 +3276,28 @@ void ecrt_master_reset(ec_master_t *master) ec_slave_request_state(sc->slave, EC_SLAVE_STATE_OP); } } + return 0; +} + +/****************************************************************************/ + +static void sc_reset_task_kicker(struct irq_work *work) +{ + struct ec_master *master = + container_of(work, struct ec_master, sc_reset_work_kicker); + schedule_work(&master->sc_reset_work); +} + +/****************************************************************************/ + +static void sc_reset_task(struct work_struct *work) +{ + struct ec_master *master = + container_of(work, struct ec_master, sc_reset_work); + + down(&master->master_sem); + ecrt_master_reset(master); + up(&master->master_sem); } /****************************************************************************/ diff --git a/master/master.h b/master/master.h index 995fc0ca..77bd97be 100644 --- a/master/master.h +++ b/master/master.h @@ -30,11 +30,13 @@ #define __EC_MASTER_H__ #include +#include #include #include #include #include #include +#include #include "device.h" #include "domain.h" @@ -294,6 +296,9 @@ struct ec_master { wait_queue_head_t request_queue; /**< Wait queue for external requests from user space. */ + struct work_struct sc_reset_work; /**< Task to reset slave configuration. */ + struct irq_work sc_reset_work_kicker; /**< NMI-Safe kicker to trigger + reset task above. */ }; /****************************************************************************/ diff --git a/master/reg_request.c b/master/reg_request.c index 29f7d4e2..eab70b1d 100644 --- a/master/reg_request.c +++ b/master/reg_request.c @@ -89,24 +89,26 @@ ec_request_state_t ecrt_reg_request_state(const ec_reg_request_t *reg) /****************************************************************************/ -void ecrt_reg_request_write(ec_reg_request_t *reg, uint16_t address, +int ecrt_reg_request_write(ec_reg_request_t *reg, uint16_t address, size_t size) { reg->dir = EC_DIR_OUTPUT; reg->address = address; reg->transfer_size = min(size, reg->mem_size); reg->state = EC_INT_REQUEST_QUEUED; + return 0; } /****************************************************************************/ -void ecrt_reg_request_read(ec_reg_request_t *reg, uint16_t address, +int ecrt_reg_request_read(ec_reg_request_t *reg, uint16_t address, size_t size) { reg->dir = EC_DIR_INPUT; reg->address = address; reg->transfer_size = min(size, reg->mem_size); reg->state = EC_INT_REQUEST_QUEUED; + return 0; } /****************************************************************************/ diff --git a/master/rtdm.c b/master/rtdm.c index f9a15b1c..0112e1a7 100644 --- a/master/rtdm.c +++ b/master/rtdm.c @@ -28,13 +28,15 @@ #include #include -#include #include "master.h" #include "ioctl.h" #include "rtdm.h" #include "rtdm_details.h" +/* include last because it does some redefinitions */ +#include + /** Set to 1 to enable device operations debugging. */ #define DEBUG 0 @@ -43,10 +45,11 @@ static int ec_rtdm_open(struct rtdm_dev_context *, rtdm_user_info_t *, int); static int ec_rtdm_close(struct rtdm_dev_context *, rtdm_user_info_t *); -static int ec_rtdm_ioctl_nrt_handler(struct rtdm_dev_context *, rtdm_user_info_t *, - unsigned int, void __user *); -static int ec_rtdm_ioctl_rt_handler(struct rtdm_dev_context *, rtdm_user_info_t *, - unsigned int, void __user *); +static int ec_rtdm_ioctl_nrt_handler(struct rtdm_dev_context *, + rtdm_user_info_t *, unsigned int, void __user *); +static int ec_rtdm_ioctl_rt_handler(struct rtdm_dev_context *, + rtdm_user_info_t *, unsigned int, void __user *); + /****************************************************************************/ /** Initialize an RTDM device. @@ -198,6 +201,8 @@ static int ec_rtdm_ioctl_nrt_handler( return ec_ioctl_rtdm_nrt(rtdm_dev->master, &ctx->ioctl_ctx, request, arg); } +/****************************************************************************/ + static int ec_rtdm_ioctl_rt_handler( struct rtdm_dev_context *context, /**< Context. */ rtdm_user_info_t *user_info, /**< User data. */ @@ -214,7 +219,8 @@ static int ec_rtdm_ioctl_rt_handler( " on RTDM device %s.\n", request, _IOC_NR(request), context->device->device_name); #endif - result = ec_ioctl_rtdm_rt(rtdm_dev->master, &ctx->ioctl_ctx, request, arg); + result = + ec_ioctl_rtdm_rt(rtdm_dev->master, &ctx->ioctl_ctx, request, arg); if (result == -ENOTTY) { /* Try again with nrt ioctl handler above in secondary mode. */ diff --git a/master/rtdm_details.h b/master/rtdm_details.h index 9bea0636..d57ce01e 100644 --- a/master/rtdm_details.h +++ b/master/rtdm_details.h @@ -16,13 +16,8 @@ * You should have received a copy of the GNU General Public License along * with the IgH EtherCAT master. If not, see . * - * The license mentioned above concerns the source code only. Using the - * EtherCAT technology and brand is only permitted in compliance with the - * industrial property and similar rights of Beckhoff Automation GmbH. - * ****************************************************************************/ - #ifndef __EC_RTDM_DETAILS_H__ #define __EC_RTDM_DETAILS_H__ @@ -60,5 +55,6 @@ static inline EC_RTDM_USERFD_T *ec_ioctl_to_rtdm(ec_ioctl_context_t *ctx) return container_of(ctx, ec_rtdm_context_t, ioctl_ctx)->user_fd; } +/****************************************************************************/ #endif // __EC_RTDM_DETAILS_H__ diff --git a/master/rtdm_xenomai_v3.c b/master/rtdm_xenomai_v3.c index 09a969c1..0cd3b577 100644 --- a/master/rtdm_xenomai_v3.c +++ b/master/rtdm_xenomai_v3.c @@ -1,10 +1,8 @@ /***************************************************************************** - * - * $Id$ * * Copyright (C) 2009-2010 Moehwald GmbH B. Benner * 2011 IgH Andreas Stewering-Bone - * 2012 Florian Pose + * 2012 Florian Pose * * This file is part of the IgH EtherCAT master. * @@ -20,10 +18,6 @@ * You should have received a copy of the GNU General Public License along * with the IgH EtherCAT master. If not, see . * - * The license mentioned above concerns the source code only. Using the - * EtherCAT technology and brand is only permitted in compliance with the - * industrial property and similar rights of Beckhoff Automation GmbH. - * ****************************************************************************/ /** \file @@ -43,6 +37,8 @@ */ #define DEBUG_RTDM 0 +/****************************************************************************/ + static int ec_rtdm_open(struct rtdm_fd *fd, int oflags) { struct ec_rtdm_context *ctx = rtdm_fd_to_private(fd); @@ -66,6 +62,8 @@ static int ec_rtdm_open(struct rtdm_fd *fd, int oflags) return 0; } +/****************************************************************************/ + static void ec_rtdm_close(struct rtdm_fd *fd) { struct ec_rtdm_context *ctx = rtdm_fd_to_private(fd); @@ -84,6 +82,8 @@ static void ec_rtdm_close(struct rtdm_fd *fd) #endif } +/****************************************************************************/ + static int ec_rtdm_ioctl_rt_handler(struct rtdm_fd *fd, unsigned int request, void __user *arg) { @@ -92,7 +92,8 @@ static int ec_rtdm_ioctl_rt_handler(struct rtdm_fd *fd, unsigned int request, struct rtdm_device *dev = rtdm_fd_device(fd); ec_rtdm_dev_t *rtdm_dev = dev->device_data; - result = ec_ioctl_rtdm_rt(rtdm_dev->master, &ctx->ioctl_ctx, request, arg); + result = + ec_ioctl_rtdm_rt(rtdm_dev->master, &ctx->ioctl_ctx, request, arg); if (result == -ENOTTY) /* Try again with nrt ioctl handler below in secondary mode. */ @@ -101,6 +102,8 @@ static int ec_rtdm_ioctl_rt_handler(struct rtdm_fd *fd, unsigned int request, return result; } +/****************************************************************************/ + static int ec_rtdm_ioctl_nrt_handler(struct rtdm_fd *fd, unsigned int request, void __user *arg) { @@ -111,12 +114,17 @@ static int ec_rtdm_ioctl_nrt_handler(struct rtdm_fd *fd, unsigned int request, return ec_ioctl_rtdm_nrt(rtdm_dev->master, &ctx->ioctl_ctx, request, arg); } +/****************************************************************************/ + static int ec_rtdm_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma) { - struct ec_rtdm_context *ctx = (struct ec_rtdm_context *) rtdm_fd_to_private(fd); + struct ec_rtdm_context *ctx = + (struct ec_rtdm_context *) rtdm_fd_to_private(fd); return rtdm_mmap_kmem(vma, (void *)ctx->ioctl_ctx.process_data); } +/****************************************************************************/ + static struct rtdm_driver ec_rtdm_driver = { .profile_info = RTDM_PROFILE_INFO(ec_rtdm, RTDM_CLASS_EXPERIMENTAL, @@ -134,6 +142,8 @@ static struct rtdm_driver ec_rtdm_driver = { }, }; +/****************************************************************************/ + int ec_rtdm_dev_init(ec_rtdm_dev_t *rtdm_dev, ec_master_t *master) { struct rtdm_device *dev; @@ -167,6 +177,8 @@ int ec_rtdm_dev_init(ec_rtdm_dev_t *rtdm_dev, ec_master_t *master) return 0; } +/****************************************************************************/ + void ec_rtdm_dev_clear(ec_rtdm_dev_t *rtdm_dev) { rtdm_dev_unregister(rtdm_dev->dev); @@ -176,3 +188,5 @@ void ec_rtdm_dev_clear(ec_rtdm_dev_t *rtdm_dev) kfree(rtdm_dev->dev); } + +/****************************************************************************/ diff --git a/master/sdo_request.c b/master/sdo_request.c index d0de57db..040c6b5d 100644 --- a/master/sdo_request.c +++ b/master/sdo_request.c @@ -178,18 +178,20 @@ int ec_sdo_request_timed_out(const ec_sdo_request_t *req /**< SDO request. */) * Application interface. ****************************************************************************/ -void ecrt_sdo_request_index(ec_sdo_request_t *req, uint16_t index, +int ecrt_sdo_request_index(ec_sdo_request_t *req, uint16_t index, uint8_t subindex) { req->index = index; req->subindex = subindex; + return 0; } /****************************************************************************/ -void ecrt_sdo_request_timeout(ec_sdo_request_t *req, uint32_t timeout) +int ecrt_sdo_request_timeout(ec_sdo_request_t *req, uint32_t timeout) { req->issue_timeout = timeout; + return 0; } /****************************************************************************/ @@ -215,24 +217,26 @@ ec_request_state_t ecrt_sdo_request_state(const ec_sdo_request_t *req) /****************************************************************************/ -void ecrt_sdo_request_read(ec_sdo_request_t *req) +int ecrt_sdo_request_read(ec_sdo_request_t *req) { req->dir = EC_DIR_INPUT; req->state = EC_INT_REQUEST_QUEUED; req->errno = 0; req->abort_code = 0x00000000; req->jiffies_start = jiffies; + return 0; } /****************************************************************************/ -void ecrt_sdo_request_write(ec_sdo_request_t *req) +int ecrt_sdo_request_write(ec_sdo_request_t *req) { req->dir = EC_DIR_OUTPUT; req->state = EC_INT_REQUEST_QUEUED; req->errno = 0; req->abort_code = 0x00000000; req->jiffies_start = jiffies; + return 0; } /****************************************************************************/ diff --git a/master/slave_config.c b/master/slave_config.c index c0c813e1..cf94f19a 100644 --- a/master/slave_config.c +++ b/master/slave_config.c @@ -656,7 +656,7 @@ int ecrt_slave_config_sync_manager(ec_slave_config_t *sc, uint8_t sync_index, /****************************************************************************/ -void ecrt_slave_config_watchdog(ec_slave_config_t *sc, +int ecrt_slave_config_watchdog(ec_slave_config_t *sc, uint16_t divider, uint16_t intervals) { EC_CONFIG_DBG(sc, 1, "%s(sc = 0x%p, divider = %u, intervals = %u)\n", @@ -664,6 +664,7 @@ void ecrt_slave_config_watchdog(ec_slave_config_t *sc, sc->watchdog_divider = divider; sc->watchdog_intervals = intervals; + return 0; } /****************************************************************************/ @@ -698,7 +699,7 @@ int ecrt_slave_config_pdo_assign_add(ec_slave_config_t *sc, /****************************************************************************/ -void ecrt_slave_config_pdo_assign_clear(ec_slave_config_t *sc, +int ecrt_slave_config_pdo_assign_clear(ec_slave_config_t *sc, uint8_t sync_index) { EC_CONFIG_DBG(sc, 1, "%s(sc = 0x%p, sync_index = %u)\n", @@ -706,12 +707,13 @@ void ecrt_slave_config_pdo_assign_clear(ec_slave_config_t *sc, if (sync_index >= EC_MAX_SYNC_MANAGERS) { EC_CONFIG_ERR(sc, "Invalid sync manager index %u!\n", sync_index); - return; + return -EINVAL; } down(&sc->master->master_sem); ec_pdo_list_clear_pdos(&sc->sync_configs[sync_index].pdos); up(&sc->master->master_sem); + return 0; } /****************************************************************************/ @@ -753,7 +755,7 @@ int ecrt_slave_config_pdo_mapping_add(ec_slave_config_t *sc, /****************************************************************************/ -void ecrt_slave_config_pdo_mapping_clear(ec_slave_config_t *sc, +int ecrt_slave_config_pdo_mapping_clear(ec_slave_config_t *sc, uint16_t pdo_index) { uint8_t sync_index; @@ -774,6 +776,7 @@ void ecrt_slave_config_pdo_mapping_clear(ec_slave_config_t *sc, } else { EC_CONFIG_WARN(sc, "PDO 0x%04X is not assigned.\n", pdo_index); } + return 0; } /****************************************************************************/ @@ -965,7 +968,7 @@ int ecrt_slave_config_reg_pdo_entry_pos( /****************************************************************************/ -void ecrt_slave_config_dc(ec_slave_config_t *sc, uint16_t assign_activate, +int ecrt_slave_config_dc(ec_slave_config_t *sc, uint16_t assign_activate, uint32_t sync0_cycle_time, int32_t sync0_shift_time, uint32_t sync1_cycle_time, int32_t sync1_shift_time) { @@ -980,6 +983,7 @@ void ecrt_slave_config_dc(ec_slave_config_t *sc, uint16_t assign_activate, sc->dc_sync[0].shift_time = sync0_shift_time; sc->dc_sync[1].cycle_time = sync1_cycle_time; sc->dc_sync[1].shift_time = sync1_shift_time; + return 0; } /****************************************************************************/ @@ -1326,19 +1330,21 @@ ec_voe_handler_t *ecrt_slave_config_create_voe_handler( /****************************************************************************/ -void ecrt_slave_config_state(const ec_slave_config_t *sc, +int ecrt_slave_config_state(const ec_slave_config_t *sc, ec_slave_config_state_t *state) { - state->online = sc->slave ? 1 : 0; + const ec_slave_t *slave = sc->slave; + + state->online = slave ? 1 : 0; if (state->online) { state->operational = - sc->slave->current_state == EC_SLAVE_STATE_OP - && !sc->slave->force_config; - state->al_state = sc->slave->current_state; + slave->current_state == EC_SLAVE_STATE_OP && !slave->force_config; + state->al_state = slave->current_state; } else { state->operational = 0; state->al_state = EC_SLAVE_STATE_UNKNOWN; } + return 0; } /****************************************************************************/ diff --git a/master/soe_request.c b/master/soe_request.c index 20aa231a..ef90124b 100644 --- a/master/soe_request.c +++ b/master/soe_request.c @@ -223,7 +223,7 @@ int ec_soe_request_append_data( /** Request a read operation. */ -void ec_soe_request_read( +int ec_soe_request_read( ec_soe_request_t *req /**< SoE request. */ ) { @@ -231,13 +231,14 @@ void ec_soe_request_read( req->state = EC_INT_REQUEST_QUEUED; req->error_code = 0x0000; req->jiffies_start = jiffies; + return 0; } /****************************************************************************/ /** Request a write operation. */ -void ec_soe_request_write( +int ec_soe_request_write( ec_soe_request_t *req /**< SoE request. */ ) { @@ -245,6 +246,7 @@ void ec_soe_request_write( req->state = EC_INT_REQUEST_QUEUED; req->error_code = 0x0000; req->jiffies_start = jiffies; + return 0; } /****************************************************************************/ @@ -263,18 +265,20 @@ int ec_soe_request_timed_out(const ec_soe_request_t *req /**< SDO request. */) * Application interface. ****************************************************************************/ -void ecrt_soe_request_idn(ec_soe_request_t *req, uint8_t drive_no, +int ecrt_soe_request_idn(ec_soe_request_t *req, uint8_t drive_no, uint16_t idn) { req->drive_no = drive_no; req->idn = idn; + return 0; } /****************************************************************************/ -void ecrt_soe_request_timeout(ec_soe_request_t *req, uint32_t timeout) +int ecrt_soe_request_timeout(ec_soe_request_t *req, uint32_t timeout) { req->issue_timeout = timeout; + return 0; } /****************************************************************************/ @@ -293,23 +297,23 @@ size_t ecrt_soe_request_data_size(const ec_soe_request_t *req) /****************************************************************************/ -ec_request_state_t ecrt_soe_request_state(ec_soe_request_t *req) +ec_request_state_t ecrt_soe_request_state(const ec_soe_request_t *req) { return ec_request_state_translation_table[req->state]; } /****************************************************************************/ -void ecrt_soe_request_read(ec_soe_request_t *req) +int ecrt_soe_request_read(ec_soe_request_t *req) { - ec_soe_request_read(req); + return ec_soe_request_read(req); } /****************************************************************************/ -void ecrt_soe_request_write(ec_soe_request_t *req) +int ecrt_soe_request_write(ec_soe_request_t *req) { - ec_soe_request_write(req); + return ec_soe_request_write(req); } /****************************************************************************/ diff --git a/master/soe_request.h b/master/soe_request.h index 67422ff7..ced4c9a1 100644 --- a/master/soe_request.h +++ b/master/soe_request.h @@ -67,8 +67,8 @@ void ec_soe_request_set_idn(ec_soe_request_t *, uint16_t); int ec_soe_request_alloc(ec_soe_request_t *, size_t); int ec_soe_request_copy_data(ec_soe_request_t *, const uint8_t *, size_t); int ec_soe_request_append_data(ec_soe_request_t *, const uint8_t *, size_t); -void ec_soe_request_read(ec_soe_request_t *); -void ec_soe_request_write(ec_soe_request_t *); +int ec_soe_request_read(ec_soe_request_t *); +int ec_soe_request_write(ec_soe_request_t *); int ec_soe_request_timed_out(const ec_soe_request_t *); /****************************************************************************/ diff --git a/master/voe_handler.c b/master/voe_handler.c index bc09069e..fc8cf307 100644 --- a/master/voe_handler.c +++ b/master/voe_handler.c @@ -112,16 +112,17 @@ size_t ec_voe_handler_mem_size( * Application interface. ****************************************************************************/ -void ecrt_voe_handler_send_header(ec_voe_handler_t *voe, uint32_t vendor_id, +int ecrt_voe_handler_send_header(ec_voe_handler_t *voe, uint32_t vendor_id, uint16_t vendor_type) { voe->vendor_id = vendor_id; voe->vendor_type = vendor_type; + return 0; } /****************************************************************************/ -void ecrt_voe_handler_received_header(const ec_voe_handler_t *voe, +int ecrt_voe_handler_received_header(const ec_voe_handler_t *voe, uint32_t *vendor_id, uint16_t *vendor_type) { uint8_t *header = voe->datagram.data + EC_MBOX_HEADER_SIZE; @@ -130,6 +131,7 @@ void ecrt_voe_handler_received_header(const ec_voe_handler_t *voe, *vendor_id = EC_READ_U32(header); if (vendor_type) *vendor_type = EC_READ_U16(header + 4); + return 0; } /****************************************************************************/ @@ -148,30 +150,33 @@ size_t ecrt_voe_handler_data_size(const ec_voe_handler_t *voe) /****************************************************************************/ -void ecrt_voe_handler_read(ec_voe_handler_t *voe) +int ecrt_voe_handler_read(ec_voe_handler_t *voe) { voe->dir = EC_DIR_INPUT; voe->state = ec_voe_handler_state_read_start; voe->request_state = EC_INT_REQUEST_BUSY; + return 0; } /****************************************************************************/ -void ecrt_voe_handler_read_nosync(ec_voe_handler_t *voe) +int ecrt_voe_handler_read_nosync(ec_voe_handler_t *voe) { voe->dir = EC_DIR_INPUT; voe->state = ec_voe_handler_state_read_nosync_start; voe->request_state = EC_INT_REQUEST_BUSY; + return 0; } /****************************************************************************/ -void ecrt_voe_handler_write(ec_voe_handler_t *voe, size_t size) +int ecrt_voe_handler_write(ec_voe_handler_t *voe, size_t size) { voe->dir = EC_DIR_OUTPUT; voe->data_size = size; voe->state = ec_voe_handler_state_write_start; voe->request_state = EC_INT_REQUEST_BUSY; + return 0; } /****************************************************************************/ diff --git a/tool/CommandCrc.cpp b/tool/CommandCrc.cpp index eb06ed66..05df4b11 100644 --- a/tool/CommandCrc.cpp +++ b/tool/CommandCrc.cpp @@ -86,7 +86,7 @@ void CommandCrc::execute(const StringVector &args) } MasterDevice m(getSingleMasterIndex()); - m.open(reset ? MasterDevice::ReadWrite : MasterDevice::Read); + m.open(MasterDevice::ReadWrite); ec_ioctl_master_t master; m.getMaster(&master);