drivers/net/ethernet/atheros/alx/Makefile | 3 drivers/net/ethernet/atheros/alx/alx.h | 213 + drivers/net/ethernet/atheros/alx/alx_ethtool.c | 1802 +++++++++++++++ drivers/net/ethernet/atheros/alx/alx_hw.c | 1461 ++++++++++++ drivers/net/ethernet/atheros/alx/alx_hw.h | 661 +++++ drivers/net/ethernet/atheros/alx/alx_main.c | 2820 +++++++++++++++++++++++++ drivers/net/ethernet/atheros/alx/alx_reg.h | 2295 ++++++++++++++++++++ 7 files changed, 9255 insertions(+) diff -Nurp linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_ethtool.c linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_ethtool.c --- linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_ethtool.c 1970-01-01 02:00:00.000000000 +0200 +++ linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_ethtool.c 2013-05-05 13:51:31.000000000 +0300 @@ -0,0 +1,1802 @@ +/* + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mdio.h> +#include <linux/interrupt.h> +#include <asm/byteorder.h> + +#include "alx_reg.h" +#include "alx_hw.h" +#include "alx.h" + + +static int alx_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); + if (ALX_CAP(hw, GIGA)) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + if (hw->adv_cfg & ADVERTISED_Autoneg) + ecmd->advertising |= hw->adv_cfg; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->autoneg = (hw->adv_cfg & ADVERTISED_Autoneg) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + ecmd->transceiver = XCVR_INTERNAL; + + if (hw->flowctrl & ALX_FC_ANEG && + hw->adv_cfg & ADVERTISED_Autoneg) { + if (hw->flowctrl & ALX_FC_RX) { + ecmd->advertising |= ADVERTISED_Pause; + if (!(hw->flowctrl & ALX_FC_TX)) + ecmd->advertising |= ADVERTISED_Asym_Pause; + } else if (hw->flowctrl & ALX_FC_TX) + ecmd->advertising |= ADVERTISED_Asym_Pause; + } + + if (hw->link_up) { + ethtool_cmd_speed_set(ecmd, hw->link_speed); + ecmd->duplex = hw->link_duplex == FULL_DUPLEX ? + DUPLEX_FULL : DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + return 0; +} + +static int alx_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + u32 adv_cfg; + int err = 0; + + while (test_and_set_bit(ALX_FLAG_RESETING, &adpt->flags)) + msleep(20); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ecmd->advertising & ADVERTISED_1000baseT_Half) { + dev_warn(&adpt->pdev->dev, "1000M half is invalid\n"); + ALX_FLAG_CLEAR(adpt, RESETING); + return -EINVAL; + } + adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; + } else { + int speed = ethtool_cmd_speed(ecmd); + + switch (speed + ecmd->duplex) { + case SPEED_10 + DUPLEX_HALF: + adv_cfg = ADVERTISED_10baseT_Half; + break; + case SPEED_10 + DUPLEX_FULL: + adv_cfg = ADVERTISED_10baseT_Full; + break; + case SPEED_100 + DUPLEX_HALF: + adv_cfg = ADVERTISED_100baseT_Half; + break; + case SPEED_100 + DUPLEX_FULL: + adv_cfg = ADVERTISED_100baseT_Full; + break; + default: + err = -EINVAL; + break; + } + } + + if (!err) { + hw->adv_cfg = adv_cfg; + err = alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl); + if (err) { + dev_warn(&adpt->pdev->dev, + "config PHY speed/duplex failed,err=%d\n", + err); + err = -EIO; + } + } + + ALX_FLAG_CLEAR(adpt, RESETING); + + return err; +} + +static void alx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + + if (hw->flowctrl & ALX_FC_ANEG && + hw->adv_cfg & ADVERTISED_Autoneg) + pause->autoneg = AUTONEG_ENABLE; + else + pause->autoneg = AUTONEG_DISABLE; + + if (hw->flowctrl & ALX_FC_TX) + pause->tx_pause = 1; + else + pause->tx_pause = 0; + + if (hw->flowctrl & ALX_FC_RX) + pause->rx_pause = 1; + else + pause->rx_pause = 0; +} + + +static int alx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + int err = 0; + bool reconfig_phy = false; + u8 fc = 0; + + if (pause->tx_pause) + fc |= ALX_FC_TX; + if (pause->rx_pause) + fc |= ALX_FC_RX; + if (pause->autoneg) + fc |= ALX_FC_ANEG; + + while (test_and_set_bit(ALX_FLAG_RESETING, &adpt->flags)) + msleep(20); + + /* restart auto-neg for auto-mode */ + if (hw->adv_cfg & ADVERTISED_Autoneg) { + if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG)) + reconfig_phy = true; + if (fc & hw->flowctrl & ALX_FC_ANEG && + (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) + reconfig_phy = true; + } + + if (reconfig_phy) { + err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); + if (err) { + dev_warn(&adpt->pdev->dev, + "config PHY flow control failed,err=%d\n", + err); + err = -EIO; + } + } + + /* flow control on mac */ + if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) + alx_cfg_mac_fc(hw, fc); + + hw->flowctrl = fc; + + ALX_FLAG_CLEAR(adpt, RESETING); + + return err; +} + +static u32 alx_get_msglevel(struct net_device *netdev) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + + return adpt->msg_enable; +} + +static void alx_set_msglevel(struct net_device *netdev, u32 data) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + + adpt->msg_enable = data; +} + +static const u32 hw_regs[] = { + ALX_DEV_CAP, ALX_DEV_CTRL, ALX_LNK_CAP, ALX_LNK_CTRL, + ALX_UE_SVRT, ALX_EFLD, ALX_SLD, ALX_PPHY_MISC1, + ALX_PPHY_MISC2, ALX_PDLL_TRNS1, + ALX_TLEXTN_STATS, ALX_EFUSE_CTRL, ALX_EFUSE_DATA, ALX_SPI_OP1, + ALX_SPI_OP2, ALX_SPI_OP3, ALX_EF_CTRL, ALX_EF_ADDR, + ALX_EF_DATA, ALX_SPI_ID, + ALX_SPI_CFG_START, ALX_PMCTRL, ALX_LTSSM_CTRL, ALX_MASTER, + ALX_MANU_TIMER, ALX_IRQ_MODU_TIMER, ALX_PHY_CTRL, ALX_MAC_STS, + ALX_MDIO, ALX_MDIO_EXTN, + ALX_PHY_STS, ALX_BIST0, ALX_BIST1, ALX_SERDES, + ALX_LED_CTRL, ALX_LED_PATN, ALX_LED_PATN2, ALX_SYSALV, + ALX_PCIERR_INST, ALX_LPI_DECISN_TIMER, + ALX_LPI_CTRL, ALX_LPI_WAIT, ALX_HRTBT_VLAN, ALX_HRTBT_CTRL, + ALX_RXPARSE, ALX_MAC_CTRL, ALX_GAP, ALX_STAD1, + ALX_LED_CTRL, ALX_HASH_TBL0, + ALX_HASH_TBL1, ALX_HALFD, ALX_DMA, ALX_WOL0, + ALX_WOL1, ALX_WOL2, ALX_WRR, ALX_HQTPD, + ALX_CPUMAP1, ALX_CPUMAP2, + ALX_MISC, ALX_RX_BASE_ADDR_HI, ALX_RFD_ADDR_LO, ALX_RFD_RING_SZ, + ALX_RFD_BUF_SZ, ALX_RRD_ADDR_LO, ALX_RRD_RING_SZ, + ALX_RFD_PIDX, ALX_RFD_CIDX, ALX_RXQ0, + ALX_RXQ1, ALX_RXQ2, ALX_RXQ3, ALX_TX_BASE_ADDR_HI, + ALX_TPD_PRI0_ADDR_LO, ALX_TPD_PRI1_ADDR_LO, + ALX_TPD_PRI2_ADDR_LO, ALX_TPD_PRI3_ADDR_LO, + ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX, + ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX, ALX_TPD_PRI0_CIDX, + ALX_TPD_PRI1_CIDX, ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX, + ALX_TPD_RING_SZ, ALX_TXQ0, ALX_TXQ1, ALX_TXQ2, + ALX_MSI_MAP_TBL1, ALX_MSI_MAP_TBL2, ALX_MSI_ID_MAP, + ALX_MSIX_MASK, ALX_MSIX_PENDING, + ALX_RSS_HASH_VAL, ALX_RSS_HASH_FLAG, ALX_RSS_BASE_CPU_NUM, +}; + +static int alx_get_regs_len(struct net_device *netdev) +{ + return (ARRAY_SIZE(hw_regs) + 0x20) * 4; +} + +static void alx_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *buff) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + u32 *p = buff; + int i; + + regs->version = (ALX_DID(hw) << 16) | (ALX_REVID(hw) << 8) | 1; + + memset(buff, 0, (ARRAY_SIZE(hw_regs) + 0x20) * 4); + + for (i = 0; i < ARRAY_SIZE(hw_regs); i++, p++) + ALX_MEM_R32(hw, hw_regs[i], p); + + /* last 0x20 for PHY register */ + for (i = 0; i < 0x20; i++) { + alx_read_phy_reg(hw, i, (u16 *)p); + p++; + } +} + +static void alx_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) + wol->wolopts |= WAKE_MAGIC; + if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) + wol->wolopts |= WAKE_PHY; + + netif_info(adpt, wol, adpt->netdev, + "wolopts = %x\n", + wol->wolopts); +} + +static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + + hw->sleep_ctrl = 0; + + if (wol->wolopts & WAKE_MAGIC) + hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC; + if (wol->wolopts & WAKE_PHY) + hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY; + + netdev_info(adpt->netdev, "wol-ctrl=%X\n", hw->sleep_ctrl); + + device_set_wakeup_enable(&adpt->pdev->dev, hw->sleep_ctrl); + + return 0; +} + +static int alx_nway_reset(struct net_device *netdev) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + + if (netif_running(netdev)) + alx_reinit(adpt, false); + + return 0; +} + +static const char alx_gstrings_test[][ETH_GSTRING_LEN] = { + "register test (offline)", + "memory test (offline)", + "interrupt test (offline)", + "loopback test (offline)", + "link test (offline)" +}; +#define ALX_TEST_LEN (sizeof(alx_gstrings_test) / ETH_GSTRING_LEN) + +/* private flags */ +#define ALX_ETH_PF_LNK_10MH BIT(0) +#define ALX_ETH_PF_LNK_10MF BIT(1) +#define ALX_ETH_PF_LNK_100MH BIT(2) +#define ALX_ETH_PF_LNK_100MF BIT(3) +#define ALX_ETH_PF_LNK_1000MF BIT(4) +#define ALX_ETH_PF_LNK_MASK (\ + ALX_ETH_PF_LNK_10MH |\ + ALX_ETH_PF_LNK_10MF |\ + ALX_ETH_PF_LNK_100MH |\ + ALX_ETH_PF_LNK_100MF |\ + ALX_ETH_PF_LNK_1000MF) + +static const char alx_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", + "rx_bcast_packets", + "rx_mcast_packets", + "rx_pause_packets", + "rx_ctrl_packets", + "rx_fcs_errors", + "rx_length_errors", + "rx_bytes", + "rx_runt_packets", + "rx_fragments", + "rx_64B_or_less_packets", + "rx_65B_to_127B_packets", + "rx_128B_to_255B_packets", + "rx_256B_to_511B_packets", + "rx_512B_to_1023B_packets", + "rx_1024B_to_1518B_packets", + "rx_1519B_to_mtu_packets", + "rx_oversize_packets", + "rx_rxf_ov_drop_packets", + "rx_rrd_ov_drop_packets", + "rx_align_errors", + "rx_bcast_bytes", + "rx_mcast_bytes", + "rx_address_errors", + "tx_packets", + "tx_bcast_packets", + "tx_mcast_packets", + "tx_pause_packets", + "tx_exc_defer_packets", + "tx_ctrl_packets", + "tx_defer_packets", + "tx_bytes", + "tx_64B_or_less_packets", + "tx_65B_to_127B_packets", + "tx_128B_to_255B_packets", + "tx_256B_to_511B_packets", + "tx_512B_to_1023B_packets", + "tx_1024B_to_1518B_packets", + "tx_1519B_to_mtu_packets", + "tx_single_collision", + "tx_multiple_collisions", + "tx_late_collision", + "tx_abort_collision", + "tx_underrun", + "tx_trd_eop", + "tx_length_errors", + "tx_trunc_packets", + "tx_bcast_bytes", + "tx_mcast_bytes", + "tx_update", +}; + +#define ALX_STATS_LEN (sizeof(alx_gstrings_stats) / ETH_GSTRING_LEN) + +static void alx_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(buf, &alx_gstrings_test, sizeof(alx_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(buf, &alx_gstrings_stats, sizeof(alx_gstrings_stats)); + break; + } +} + +static int alx_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ALX_STATS_LEN; + case ETH_SS_TEST: + return ALX_TEST_LEN; + default: + return -ENOTSUPP; + } +} + +struct alx_reg_attr { + u16 reg; + u32 ro_mask; + u32 rw_mask; + u32 rc_mask; + u32 rst_val; + u8 rst_affect; +}; + +struct alx_reg_attr ar816x_regs_a[] = { + {0x1400, 0xffff80E0, 0x4D00, 0x0, 0x40020000, 0}, + {0x1404, 0x0, 0xffffffff, 0x0, 0x0, 1}, + {0x1408, 0x0, 0xffffffff, 0x0, 0x0, 1}, + {0x140c, 0xFFFF0000, 0x0, 0x0, 0xffff3800, 0}, + {0x1410, 0xffffffff, 0x0, 0x0, 0x0000, 0}, + {0x1414, 0x0, 0x0, 0x0, 0x0, 1}, + {0x141C, 0xfffffffe, 0x0, 0x0, 0x0, 1}, + {0x1420, 0xfffffffe, 0x0, 0x0, 0x0, 1}, + {0x1484, 0x0, 0x7f7f7f7f, 0x0, 0x60405060, 1}, + {0x1490, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1494, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1498, 0x0, 0xffff3ff, 0x0, 0x07a1f037, 1}, + {0x149C, 0xffff0000, 0xffff, 0x0, 0x600, 1}, + {0x14a0, 0x808078c0, 0x7f803f, 0x7f000700, 0x0, 1}, + {0x14a4, 0x0, 0xFFFFFFFF, 0x0, 0x0, 1}, + {0x14a8, 0xFF000000, 0x00FFFFFF, 0x0, 0x0, 1}, + {0x1540, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1544, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1550, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1560, 0xFFFFF000, 0xfff, 0x0, 0x0, 0}, + {0x1564, 0xFFFF0000, 0xffff, 0x0, 0x0, 0}, + {0x1568, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1578, 0xFFFFF000, 0xfff, 0x0, 0x0, 0}, + {0x157C, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1580, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1584, 0xFFFF0000, 0xffff, 0x0, 0x0, 0}, + {0x1588, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1590, 0xFF00, 0xFFFF00DF, 0x0, 0x01000045, 1}, + {0x1594, 0xFFFFF800, 0x7FF, 0x0, 191, 1}, + {0x15A0, 0x200E0040, 0x5FF1FFBF, 0x0, 0x40810083, 1}, + {0x15A4, 0xFFFFF000, 0xFFF, 0x0, 0x1210, 1}, + {0x15A8, 0xF000F000, 0x0FFF0FFF, 0x0, 0x02E003C0, 1}, + {0x15AC, 0xF000, 0xFFFF0FFF, 0x0, 0x0100, 1}, + {0x15C4, 0xFF000000, 0xFFFFFF, 0x0, 0x0, 1}, + {0x15C8, 0xFFFF0000, 0xFFFF, 0x0, 0x0100, 1}, + {0x15E0, 0xFFFFF000, 0xFFF, 0x0, 0x0, 1}, + {0x15F0, 0x0, 0xFFFFFFFF, 0x0, 0x0, 1}, + {0x15F4, 0xFFFFFFFF, 0x0, 0x0, 0x0, 1}, + {0x15F8, 0xFFFFFFFF, 0x0, 0x0, 0x0, 1}, + {0x15FC, 0xFFFFFFFF, 0x0, 0x0, 0x0, 1}, + {0x1700, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1704, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1708, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x170c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1710, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1714, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1718, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x171c, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1720, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1724, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1728, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x172c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1730, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1734, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1738, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x173c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1740, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1744, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1748, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x174c, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x1750, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1754, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1758, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x175c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1760, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1764, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1768, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x176c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1770, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x1774, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1778, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x177c, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1780, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1784, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1788, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x178c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1790, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1794, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1798, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x179c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17a0, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17a4, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17a8, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17ac, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17b0, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17b4, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17b8, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17bc, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x17c0, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0xffff, 0, 0, 0, 0, 0}, +}; + +struct alx_reg_attr ar816x_regs_b[] = { + {0x1400, 0xffff80E0, 0x4D00, 0x0, 0x40020000, 0}, + {0x1404, 0x0, 0xffffffff, 0x0, 0x0, 1}, + {0x1408, 0x0, 0xffffffff, 0x0, 0x0, 1}, + {0x140c, 0xFFFF0000, 0x0, 0x0, 0xffff3800, 0}, + {0x1410, 0xffffffff, 0x0, 0x0, 0x0000, 0}, + {0x1414, 0x0, 0x0, 0x0, 0x0, 1}, + {0x141C, 0xfffffffe, 0x0, 0x0, 0x0, 1}, + {0x1420, 0xfffffffe, 0x0, 0x0, 0x0, 1}, + {0x1484, 0x0, 0x7f7f7f7f, 0x0, 0x60405018, 1}, + {0x1490, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1494, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1498, 0x0, 0xffff3ff, 0x0, 0x07a1f037, 1}, + {0x149C, 0xffff0000, 0xffff, 0x0, 0x600, 1}, + {0x14a0, 0x808078c0, 0x7f803f, 0x7f000700, 0x0, 1}, + {0x14a4, 0x0, 0xFFFFFFFF, 0x0, 0x0, 1}, + {0x14a8, 0xFF000000, 0x00FFFFFF, 0x0, 0x0, 1}, + {0x1540, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1544, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1550, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1560, 0xFFFFF000, 0xfff, 0x0, 0x0, 0}, + {0x1564, 0xFFFF0000, 0xffff, 0x0, 0x0, 0}, + {0x1568, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1578, 0xFFFFF000, 0xfff, 0x0, 0x0, 0}, + {0x157C, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1580, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1584, 0xFFFF0000, 0xffff, 0x0, 0x0, 0}, + {0x1588, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1590, 0xFF00, 0xFFFF00DF, 0x0, 0x01000045, 1}, + {0x1594, 0xFFFFF800, 0x7FF, 0x0, 191, 1}, + {0x15A0, 0x200E0040, 0x5FF1FFBF, 0x0, 0x40810083, 1}, + {0x15A4, 0xFFFFF000, 0xFFF, 0x0, 0x1210, 1}, + {0x15A8, 0xF000F000, 0x0FFF0FFF, 0x0, 0x02E003C0, 1}, + {0x15AC, 0xF000, 0xFFFF0FFF, 0x0, 0x0100, 1}, + {0x15C4, 0xFF000000, 0xFFFFFF, 0x0, 0x0, 1}, + {0x15C8, 0xFFFF0000, 0xFFFF, 0x0, 0x0100, 1}, + {0x15E0, 0xFFFFF000, 0xFFF, 0x0, 0x0, 1}, + {0x15F0, 0x0, 0xFFFFFFFF, 0x0, 0x0, 1}, + {0x15F4, 0xFFFFFFFF, 0x0, 0x0, 0x0, 1}, + {0x15F8, 0xFFFFFFFF, 0x0, 0x0, 0x0, 1}, + {0x15FC, 0xFFFFFFFF, 0x0, 0x0, 0x0, 1}, + {0x1700, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1704, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1708, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x170c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1710, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1714, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1718, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x171c, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1720, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1724, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1728, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x172c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1730, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1734, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1738, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x173c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1740, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1744, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1748, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x174c, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x1750, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1754, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1758, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x175c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1760, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1764, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1768, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x176c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1770, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x1774, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1778, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x177c, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1780, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1784, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1788, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x178c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1790, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1794, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1798, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x179c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17a0, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17a4, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17a8, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17ac, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17b0, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17b4, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17b8, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17bc, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x17c0, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0xffff, 0, 0, 0, 0, 0}, +}; + +struct alx_reg_attr ar816x_regs_c[] = { + {0x1400, 0xffff80E0, 0x4D00, 0x0, 0x40020000, 0}, + {0x1404, 0x0, 0xffffffff, 0x0, 0x0, 1}, + {0x1408, 0x0, 0xffffffff, 0x0, 0x0, 1}, + {0x140c, 0xFFFF0000, 0x0, 0x0, 0xffff3800, 0}, + {0x1410, 0xffffffff, 0x0, 0x0, 0x0000, 0}, + {0x1414, 0x0, 0x0, 0x0, 0x0, 1}, + {0x141C, 0xfffffffe, 0x0, 0x0, 0x0, 1}, + {0x1420, 0xfffffffe, 0x0, 0x0, 0x0, 1}, + {0x1484, 0x0, 0x7f7f7f7f, 0x0, 0x60405018, 1}, + {0x1490, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1494, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1498, 0x0, 0xffff3ff, 0x0, 0x07a1f037, 1}, + {0x149C, 0xffff0000, 0xffff, 0x0, 0x600, 1}, + {0x14a0, 0x808078c0, 0x7f803f, 0x7f000700, 0x0, 1}, + {0x14a4, 0x0, 0xFFFFFFFF, 0x0, 0x0, 1}, + {0x14a8, 0xFF000000, 0x00FFFFFF, 0x0, 0x0, 1}, + {0x1540, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1544, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1550, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1560, 0xFFFFF000, 0xfff, 0x0, 0x0, 0}, + {0x1564, 0xFFFF0000, 0xffff, 0x0, 0x0, 0}, + {0x1568, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1578, 0xFFFFF000, 0xfff, 0x0, 0x0, 0}, + {0x157C, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1580, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1584, 0xFFFF0000, 0xffff, 0x0, 0x0, 0}, + {0x1588, 0x0, 0xffffffff, 0x0, 0x0, 0}, + {0x1590, 0xFF00, 0xFFFF00DF, 0x0, 0x01000045, 1}, + {0x1594, 0xFFFFF800, 0x7FF, 0x0, 191, 1}, + {0x15A0, 0x200E0040, 0x5FF1FFBF, 0x0, 0x40810083, 1}, + {0x15A4, 0xFFFFF000, 0xFFF, 0x0, 0x1210, 1}, + {0x15A8, 0xF000F000, 0x0FFF0FFF, 0x0, 0x02E009C0, 1}, + {0x15AC, 0xF000, 0xFFFF0FFF, 0x0, 0x0100, 1}, + {0x15C4, 0xFF000000, 0xFFFFFF, 0x0, 0x0, 1}, + {0x15C8, 0xFFFF0000, 0xFFFF, 0x0, 0x0100, 1}, + {0x15E0, 0xFFFFF000, 0xFFF, 0x0, 0x0, 1}, + {0x15F0, 0x0, 0xFFFFFFFF, 0x0, 0x0, 1}, + {0x15F4, 0xFFFFFFFF, 0x0, 0x0, 0x0, 1}, + {0x15F8, 0xFFFFFFFF, 0x0, 0x0, 0x0, 1}, + {0x15FC, 0xFFFFFFFF, 0x0, 0x0, 0x0, 1}, + {0x1700, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1704, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1708, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x170c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1710, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1714, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1718, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x171c, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1720, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1724, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1728, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x172c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1730, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1734, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1738, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x173c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1740, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1744, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1748, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x174c, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x1750, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1754, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1758, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x175c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1760, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1764, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1768, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x176c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1770, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x1774, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1778, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x177c, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x1780, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1784, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1788, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x178c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1790, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1794, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x1798, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x179c, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17a0, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17a4, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17a8, 0xffffffff, 0x0, 0xffffff, 0x0, 1}, + {0x17ac, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17b0, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17b4, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17b8, 0xffffffff, 0x0, 0xffff, 0x0, 1}, + {0x17bc, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0x17c0, 0xffffffff, 0x0, 0xffffffff, 0x0, 1}, + {0xffff, 0, 0, 0, 0, 0}, +}; + +static int alx_diag_register(struct alx_adapter *adpt, u64 *data) +{ + struct alx_hw *hw = &adpt->hw; + u8 rev = ALX_REVID(hw); + struct alx_reg_attr *preg, *oreg; + u32 val, old; + + switch (ALX_DID(hw)) { + case ALX_DEV_ID_AR8161: + case ALX_DEV_ID_AR8162: + case ALX_DEV_ID_AR8171: + case ALX_DEV_ID_AR8172: + if (rev == ALX_REV_B0) + oreg = ar816x_regs_b; + else if (rev == ALX_REV_C0) + oreg = ar816x_regs_c; + else + oreg = ar816x_regs_a; + break; + default: + /* unknow type */ + *data = 1; + return -EIO; + } + + /* issue a MAC-reset */ + ALX_MEM_W32(hw, ALX_MASTER, ALX_MASTER_DMA_MAC_RST); + msleep(50); + + /* check reset value */ + preg = oreg; + while (preg->reg != 0xffff) { + if (preg->rst_affect) { + ALX_MEM_R32(hw, preg->reg, &val); + if (val != preg->rst_val) { + netif_err(adpt, hw, adpt->netdev, + "register %X, hard-rst:%X, read-val:%X\n", + preg->reg, preg->rst_val, val); + *data = 2; + return -EIO; + } + } + preg++; + } + + /* check read-clear/read-write attribute */ + preg = oreg; + + while (preg->reg != 0xffff) { + ALX_MEM_R32(hw, preg->reg, &old); + + /* read clear */ + if (preg->rc_mask) { + u32 v2; + + msleep(20); + ALX_MEM_R32(hw, preg->reg, &v2); + if ((v2 & preg->rc_mask) != 0) { + netif_err(adpt, hw, adpt->netdev, + "register %X, RC-mask:%X, Old:%X, New:%X\n", + preg->reg, preg->rc_mask, old, v2); + *data = 3; + return -EIO; + } + } + + /* read/write */ + ALX_MEM_W32(hw, preg->reg, 0xffffffff & preg->rw_mask); + ALX_MEM_FLUSH(hw); + ALX_MEM_R32(hw, preg->reg, &val); + if ((val & preg->rw_mask) != preg->rw_mask) { + netif_err(adpt, hw, adpt->netdev, + "register %X, RW-mask:%X, val-1:%X\n", + preg->reg, preg->rw_mask, val); + *data = 4; + return -EIO; + } + ALX_MEM_W32(hw, preg->reg, 0); + ALX_MEM_FLUSH(hw); + ALX_MEM_R32(hw, preg->reg, &val); + if ((val & preg->rw_mask) != 0) { + netif_err(adpt, hw, adpt->netdev, + "register %X, RW-mask:%X, val-0:%X\n", + preg->reg, preg->rw_mask, val); + *data = 4; + return -EIO; + } + + /* restore */ + ALX_MEM_W32(hw, preg->reg, old); + + preg++; + } + + return 0; +} + +static int alx_diag_sram(struct alx_adapter *adpt, u64 *data) +{ + struct alx_hw *hw = &adpt->hw; + u32 ret[2]; + int i, err; + + err = alx_reset_mac(hw); + if (err) { + netif_err(adpt, hw, adpt->netdev, "reset_mac fail %d\n", err); + *data = 1; + goto out; + } + /* issue bist command */ + ALX_MEM_W32(hw, ALX_BIST0, ALX_BIST0_START); + ALX_MEM_W32(hw, ALX_BIST1, ALX_BIST1_START); + + /* wait for 100ms */ + ret[1] = ret[0] = 0; + for (i = 0; i < 5; i++) { + msleep(20); + ALX_MEM_R32(hw, ALX_BIST0, &ret[0]); + ALX_MEM_R32(hw, ALX_BIST1, &ret[1]); + if (ret[0] & ALX_BIST0_START || ret[1] & ALX_BIST1_START) + continue; + else + break; + } + + for (i = 0; i < 2; i++) { + if (ret[i] & ALX_BIST0_START) { + netif_err(adpt, hw, adpt->netdev, + "sram(%d) bist not complete(%X)!\n", + i, ret[i]); + *data = 2; + err = -EIO; + goto out; + } + if (ret[i] & ALX_BIST0_FAIL) { + netif_err(adpt, hw, adpt->netdev, + "sram(%d) bist fail(%X)!\n", + i, ret[i]); + *data = 3; + err = -EIO; + goto out; + } + } +out: + return err; +} + +static int alx_diag_reset(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + int err; + + alx_reset_pcie(hw); + alx_reset_phy(hw, !hw->hib_patch); + err = alx_reset_mac(hw); + if (!err) + err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); + + if (err) { + netif_err(adpt, hw, adpt->netdev, "alx_diag_reset err %X\n", + err); + } + + return err; +} + +static int alx_diag_link(struct alx_adapter *adpt, u64 *data) +{ + struct alx_hw *hw = &adpt->hw; + u32 flags, ethadv; + u16 speed; + u8 fc; + int i, err; + + ethadv = ADVERTISED_Autoneg; + flags = adpt->eth_pflags & ALX_ETH_PF_LNK_MASK; + if (flags == 0) + flags = ALX_ETH_PF_LNK_MASK; + if (flags & ALX_ETH_PF_LNK_10MH) + ethadv |= ADVERTISED_10baseT_Half; + if (flags & ALX_ETH_PF_LNK_10MF) + ethadv |= ADVERTISED_10baseT_Full; + if (flags & ALX_ETH_PF_LNK_100MH) + ethadv |= ADVERTISED_100baseT_Half; + if (flags & ALX_ETH_PF_LNK_100MF) + ethadv |= ADVERTISED_100baseT_Full; + if (flags & ALX_ETH_PF_LNK_1000MF) + ethadv |= ADVERTISED_1000baseT_Full; + + fc = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; + + alx_reset_phy(hw, !hw->hib_patch); + err = alx_setup_speed_duplex(hw, ethadv, fc); + if (err) { + netif_err(adpt, hw, adpt->netdev, + "config PHY speed/duplex failed, adv=%X,err=%d\n", + ethadv, err); + *data = 1; + goto out; + } + + /* wait for linkup */ + for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) { + bool link_up; + + msleep(100); + err = alx_get_phy_link(hw, &link_up, &speed); + if (err) { + netif_err(adpt, hw, adpt->netdev, + "get PHY speed/duplex failed,err=%d\n", + err); + *data = 2; + goto out; + } + if (link_up) + break; + } + if (i == ALX_MAX_SETUP_LNK_CYCLE) { + err = ALX_LINK_TIMEOUT; + netif_err(adpt, hw, adpt->netdev, + "get PHY speed/duplex timeout.\n"); + *data = 3; + goto out; + } + + netif_info(adpt, hw, adpt->netdev, "link:%s\n", speed_desc(speed)); + +out: + return err; +} + +static irqreturn_t alx_diag_msix_isr(int irq, void *data) +{ + struct alx_adapter *adpt = data; + struct alx_hw *hw = &adpt->hw; + u32 intr; + int i, vect = -1; + + for (i = 0; i < adpt->nr_vec; i++) + if (irq == adpt->msix_ent[i].vector) { + vect = i; + break; + } + if (vect == -1) { + netdev_err(adpt->netdev, "vector not found irq=%d\n", irq); + goto err_out; + } + if (adpt->eth_diag_vect != vect) { + netdev_err(adpt->netdev, "wrong msix interrupt, irq=%d\n", irq); + goto err_out; + } + + alx_mask_msix(hw, vect, true); + + ALX_MEM_R32(hw, ALX_ISR, &intr); + ALX_MEM_W32(hw, ALX_ISR, intr); + + alx_mask_msix(hw, vect, false); + + adpt->eth_diag_cnt++; + return IRQ_HANDLED; + +err_out: + + ALX_MEM_W32(hw, ALX_IMR, 0); + return IRQ_HANDLED; +} + +static irqreturn_t alx_diag_msi_isr(int irq, void *data) +{ + struct alx_adapter *adpt = data; + struct alx_hw *hw = &adpt->hw; + u32 intr; + + ALX_MEM_R32(hw, ALX_ISR, &intr); + ALX_MEM_W32(hw, ALX_ISR, intr | ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_ISR, 0); + + if (intr & ALX_ISR_MANU) + adpt->eth_diag_cnt++; + + return IRQ_HANDLED; +} + +static irqreturn_t alx_diag_intx_isr(int irq, void *data) +{ + struct alx_adapter *adpt = data; + struct alx_hw *hw = &adpt->hw; + u32 intr; + + /* read interrupt status */ + ALX_MEM_R32(hw, ALX_ISR, &intr); + if (intr & ALX_ISR_DIS) + return IRQ_NONE; + + ALX_MEM_W32(hw, ALX_ISR, intr | ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_ISR, 0); + + if (intr & ALX_ISR_MANU) + adpt->eth_diag_cnt++; + + return IRQ_HANDLED; +} + +static int alx_diag_msix(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + char irq_lbl[IFNAMSIZ]; + int test_vect, i = 0, err = 0; + u32 val; + + alx_init_intr(adpt); + if (!ALX_FLAG(adpt, USING_MSIX)) + goto out; + + ALX_MEM_W32(hw, ALX_MSI_RETRANS_TIMER, + FIELDX(ALX_MSI_RETRANS_TM, 100)); + ALX_MEM_W32(hw, ALX_MSI_ID_MAP, 0); + ALX_MEM_W32(hw, ALX_ISR, ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_IMR, ALX_ISR_MANU); + + for (i = 0; i < adpt->nr_vec; i++) { + sprintf(irq_lbl, "%s-test-%d", adpt->netdev->name, i); + err = request_irq(adpt->msix_ent[i].vector, alx_diag_msix_isr, + 0, irq_lbl, adpt); + if (err) + goto out; + } + + for (test_vect = 0; test_vect < adpt->nr_vec; test_vect++) { + u32 tbl[2]; + u32 other_vec = test_vect + 1; + + ALX_MEM_W32(hw, ALX_ISR, ALX_ISR_DIS); + + if (other_vec >= adpt->nr_vec) + other_vec = 0; + other_vec |= other_vec << 4; + other_vec |= other_vec << 8; + other_vec |= other_vec << 16; + tbl[1] = other_vec; + tbl[0] = (other_vec & 0x0FFFFFFF) | ((u32)test_vect << 28); + ALX_MEM_W32(hw, ALX_MSI_MAP_TBL1, tbl[0]); + ALX_MEM_W32(hw, ALX_MSI_MAP_TBL2, tbl[1]); + ALX_MEM_W32(hw, ALX_ISR, 0); + ALX_MEM_FLUSH(hw); + + adpt->eth_diag_vect = test_vect; + adpt->eth_diag_cnt = 0; + /* issue a manual interrupt */ + ALX_MEM_R32(hw, ALX_MASTER, &val); + val |= ALX_MASTER_MANU_INT; + ALX_MEM_W32(hw, ALX_MASTER, val); + + /* wait for 50ms */ + msleep(50); + if (adpt->eth_diag_cnt != 1) { + err = -EIO; + goto out; + } + } + +out: + ALX_MEM_W32(hw, ALX_ISR, ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_IMR, 0); + for (i--; i >= 0; i--) { + synchronize_irq(adpt->msix_ent[i].vector); + free_irq(adpt->msix_ent[i].vector, adpt); + } + alx_disable_advanced_intr(adpt); + + return err; +} + + +static int alx_diag_msi(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + struct pci_dev *pdev = adpt->pdev; + unsigned long cap = hw->capability; + u32 val; + bool irq_installed = false; + int err = 0; + + ALX_CAP_CLEAR(hw, MSIX); + alx_init_intr(adpt); + if (!ALX_FLAG(adpt, USING_MSI)) + goto out; + + pci_disable_msix(pdev); + ALX_MEM_W32(hw, ALX_MSI_RETRANS_TIMER, + FIELDX(ALX_MSI_RETRANS_TM, 100) | ALX_MSI_MASK_SEL_LINE); + ALX_MEM_W32(hw, ALX_ISR, ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_IMR, ALX_ISR_MANU); + err = request_irq(pdev->irq, alx_diag_msi_isr, 0, + adpt->netdev->name, adpt); + if (err) + goto out; + + irq_installed = true; + adpt->eth_diag_cnt = 0; + ALX_MEM_W32(hw, ALX_ISR, 0); + ALX_MEM_FLUSH(hw); + ALX_MEM_R32(hw, ALX_MASTER, &val); + val |= ALX_MASTER_MANU_INT; + ALX_MEM_W32(hw, ALX_MASTER, val); + + /* wait for 50ms */ + msleep(50); + if (adpt->eth_diag_cnt != 1) { + err = -EIO; + goto out; + } + +out: + ALX_MEM_W32(hw, ALX_ISR, ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_IMR, 0); + if (irq_installed) { + synchronize_irq(pdev->irq); + free_irq(pdev->irq, adpt); + } + alx_disable_advanced_intr(adpt); + hw->capability = cap; + + return err; +} + +static int alx_diag_intx(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + struct pci_dev *pdev = adpt->pdev; + u32 val; + bool irq_installed = false; + int err = 0; + + pci_disable_msix(pdev); + pci_disable_msi(pdev); + ALX_MEM_W32(hw, ALX_MSI_RETRANS_TIMER, 0); + ALX_MEM_W32(hw, ALX_ISR, ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_IMR, ALX_ISR_MANU); + err = request_irq(pdev->irq, alx_diag_intx_isr, IRQF_SHARED, + adpt->netdev->name, adpt); + if (err) + goto out; + + irq_installed = true; + adpt->eth_diag_cnt = 0; + ALX_MEM_W32(hw, ALX_ISR, 0); + ALX_MEM_FLUSH(hw); + ALX_MEM_R32(hw, ALX_MASTER, &val); + val |= ALX_MASTER_MANU_INT; + ALX_MEM_W32(hw, ALX_MASTER, val); + + /* wait for 50ms */ + msleep(50); + if (adpt->eth_diag_cnt != 1) { + err = -EIO; + goto out; + } + +out: + ALX_MEM_W32(hw, ALX_ISR, ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_IMR, 0); + if (irq_installed) { + synchronize_irq(pdev->irq); + free_irq(pdev->irq, adpt); + } + alx_disable_advanced_intr(adpt); + + return err; +} + +static int alx_diag_interrupt(struct alx_adapter *adpt, u64 *data) +{ + int err; + + err = alx_diag_msix(adpt); + if (err) { + *data = 1; + goto out; + } + err = alx_diag_msi(adpt); + if (err) { + *data = 2; + goto out; + } + err = alx_diag_intx(adpt); + if (err) { + *data = 3; + goto out; + } + +out: + return err; +} + +static int alx_diag_pkt_len[] = {36, 512, 2048}; +static u8 alx_tso_pkt_hdr[] = { +0x08, 0x00, +0x45, 0x00, 0x00, 0x00, +0x00, 0x00, 0x40, 0x00, +0x40, 0x06, 0x00, 0x00, +0xc0, 0xa8, 0x64, 0x0A, +0xc0, 0xa8, 0x64, 0x0B, +0x0d, 0x00, 0xe0, 0x00, +0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x02, 0x00, +0x80, 0x10, 0x10, 0x00, +0x14, 0x09, 0x00, 0x00, +0x08, 0x0a, 0x11, 0x22, +0x33, 0x44, 0x55, 0x66, +0x77, 0x88, 0x01, 0x00, +}; +#define ALX_TSO_IP_HDR_LEN 20 +#define ALX_TSO_IP_OPT_LEN 0 +#define ALX_TSO_TCP_HDR_LEN 20 +#define ALX_TSO_TCP_OPT_LEN 12 + +static void alx_diag_build_pkt(struct sk_buff *skb, u8 *dest_addr, + int seq, bool lso) +{ + struct iphdr *iph; + struct tcphdr *tcph; + int offset; + u8 *pkt_data; + + pkt_data = skb->data; + memcpy(pkt_data, dest_addr, 6); + memset(pkt_data + 6, 0x0, 6); + offset = 2 * ETH_ALEN; + + if (lso) { + iph = (struct iphdr *)&pkt_data[ETH_HLEN]; + offset = ETH_HLEN + ALX_TSO_IP_HDR_LEN + ALX_TSO_IP_OPT_LEN; + tcph = (struct tcphdr *)&pkt_data[offset]; + memcpy(pkt_data + ETH_ALEN * 2, + alx_tso_pkt_hdr, sizeof(alx_tso_pkt_hdr)); + iph->tot_len = htons(skb->len - ETH_HLEN); + iph->check = 0; + tcph->check = ~csum_tcpudp_magic( + iph->saddr, + iph->daddr, + 0, IPPROTO_TCP, 0); + offset = ETH_ALEN * 2 + sizeof(alx_tso_pkt_hdr); + } else { + *(u16 *)&pkt_data[offset] = htons((u16)(skb->len - offset)); + offset += 2; + } + + *(u32 *)&pkt_data[offset] = (u32)seq; + offset += 4; + *(u16 *)&pkt_data[offset] = (u16)skb->len; + for (offset += 2; offset < skb->len; offset++) + pkt_data[offset] = (u8)(offset & 0xFF); +} + +static void alx_diag_rss_shift_key(u8 *pkey, int nkey) +{ + int len = nkey; + int i; + u8 carry = 0; + + for (i = len - 1; i >= 0; i--) { + if (pkey[i] & 0x80) { + pkey[i] = (pkey[i] << 1) | carry; + carry = 1; + } else { + pkey[i] = (pkey[i] << 1) | carry; + carry = 0; + } + } +} + +static int alx_diag_pkt_to_rxq(struct alx_adapter *adpt, struct sk_buff *skb) +{ + struct alx_hw *hw = &adpt->hw; + struct iphdr *iph; + struct tcphdr *tcph; + int i, j, que; + u8 hash_input[12], key[40]; + u32 hash; + + if (!ALX_CAP(hw, RSS) || ntohs(ETH_P_IP) != skb->data[ETH_ALEN * 2]) + return 0; + + iph = (struct iphdr *)&skb->data[ETH_HLEN]; + i = ETH_HLEN + iph->ihl * 4; + tcph = (struct tcphdr *)&skb->data[i]; + *(u32 *)&hash_input[0] = ntohl(be32_to_cpu(iph->saddr)); + *(u32 *)&hash_input[4] = ntohl(be32_to_cpu(iph->daddr)); + *(u16 *)&hash_input[8] = ntohs(be16_to_cpu(tcph->source)); + *(u16 *)&hash_input[10] = ntohs(be16_to_cpu(tcph->dest)); + + /* calcu hash */ + hash = 0; + memcpy(key, hw->rss_key, 40); + for (i = 0; i < 12; i++) { + for (j = 7; j >= 0; j--) { + if (hash_input[i] & (u8)(1 << j)) + hash ^= swab32(*(u32 *)key); + alx_diag_rss_shift_key(key, 40); + } + } + hash &= (hw->rss_idt_size - 1); + i = (int)(hash >> 3); + j = (int)(hash & 7); + que = (int)(hw->rss_idt[i] >> (j * 4) & 0xF); + + return que; +} + +static int alx_diag_rx_pkt(struct alx_adapter *adpt, int rx_qidx, + struct sk_buff **ppskb) +{ + struct alx_rx_queue *rxq; + struct rrd_desc *rrd; + struct alx_buffer *rxb; + struct sk_buff *skb; + u16 length; + int qnum; + + rxq = alx_hw_rxq(adpt->qnapi[rx_qidx]->rxq); + + rrd = rxq->rrd_hdr + rxq->rrd_cidx; + if (!(rrd->word3 & (1 << RRD_UPDATED_SHIFT))) + return 1; + + rrd->word3 &= ~(1 << RRD_UPDATED_SHIFT); + if (unlikely(FIELD_GETX(rrd->word0, RRD_SI) != rxq->cidx || + FIELD_GETX(rrd->word0, RRD_NOR) != 1)) { + netif_err(adpt, rx_err, adpt->netdev, + "wrong SI/NOR packet! rrd->word0= %08x\n", + rrd->word0); + return -1; + } + rxb = rxq->bf_info + rxq->cidx; + dma_unmap_single(rxq->dev, + dma_unmap_addr(rxb, dma), + dma_unmap_len(rxb, size), + DMA_FROM_DEVICE); + dma_unmap_len_set(rxb, size, 0); + skb = rxb->skb; + rxb->skb = NULL; + + if (unlikely(rrd->word3 & (1 << RRD_ERR_RES_SHIFT) || + rrd->word3 & (1 << RRD_ERR_LEN_SHIFT))) { + netif_err(adpt, rx_err, adpt->netdev, + "wrong packet! rrd->word3 is %08x\n", + rrd->word3); + rrd->word3 = 0; + return -2; + } + length = FIELD_GETX(rrd->word3, RRD_PKTLEN) - ETH_FCS_LEN; + skb_put(skb, length); + switch (FIELD_GETX(rrd->word2, RRD_PID)) { + case RRD_PID_IPV6UDP: + case RRD_PID_IPV4UDP: + case RRD_PID_IPV4TCP: + case RRD_PID_IPV6TCP: + if (rrd->word3 & ((1 << RRD_ERR_L4_SHIFT) | + (1 << RRD_ERR_IPV4_SHIFT))) { + netif_err(adpt, rx_err, adpt->netdev, + "rx-chksum error, w2=%X\n", + rrd->word2); + return -3; + } + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } + qnum = FIELD_GETX(rrd->word2, RRD_RSSQ) % adpt->nr_rxq; + if (rx_qidx != qnum) { + netif_err(adpt, rx_err, adpt->netdev, + "rx Q-number is wrong (%d:%d), hash=%X,w2=%X\n", + rx_qidx, qnum, rrd->rss_hash, rrd->word2); + return -4; + } + + if (++rxq->cidx == rxq->count) + rxq->cidx = 0; + if (++rxq->rrd_cidx == rxq->count) + rxq->rrd_cidx = 0; + + alx_alloc_rxring_buf(adpt, rxq); + + *ppskb = skb; + return 0; +} + +static int alx_diag_cmp_pkts(struct sk_buff *tx_skb, struct sk_buff *rx_skb, + int mss, int seg_idx) +{ + int cmp_offs, cmp_len; + int hdr_len, tx_datalen; + + if (mss == 0) { + if (rx_skb->len < tx_skb->len || + memcmp(tx_skb->data, rx_skb->data, tx_skb->len)) + return -EIO; + return 0; + } + /* LSO */ + hdr_len = ETH_HLEN + ALX_TSO_IP_HDR_LEN + ALX_TSO_IP_OPT_LEN + + ALX_TSO_TCP_HDR_LEN + ALX_TSO_TCP_OPT_LEN; + tx_datalen = tx_skb->len - hdr_len; + cmp_offs = hdr_len + mss * seg_idx; + cmp_len = tx_skb->len - cmp_offs; + if (cmp_len > mss) + cmp_len = mss; + if (cmp_len > tx_skb->len) + return -EIO; + + return memcmp(&tx_skb->data[cmp_offs], + &rx_skb->data[hdr_len], + cmp_len); +} + +static int alx_diag_lpbk_run_packets(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + struct sk_buff *tx_skb, *rx_skb; + int i, j, pkt_len, max_len, mss; + struct alx_tx_queue *txq; + int q_idx, seg_idx, nr_pkts; + struct tpd_desc *tpd; + dma_addr_t dma; + int err; + + max_len = hw->mtu + ETH_HLEN; + for (i = 0; i < 1000; i++) { + pkt_len = alx_diag_pkt_len[i % ARRAY_SIZE(alx_diag_pkt_len)]; + mss = 0; + nr_pkts = 1; + if (pkt_len > max_len) { + mss = max_len - ETH_ALEN * 2 - sizeof(alx_tso_pkt_hdr); + if (mss < 0) { + mss = 0; + pkt_len = max_len; + } else { + nr_pkts = DIV_ROUND_UP(pkt_len - ETH_ALEN * 2 - + sizeof(alx_tso_pkt_hdr), mss); + } + } + tx_skb = netdev_alloc_skb(adpt->netdev, pkt_len); + if (!tx_skb) + return -ENOMEM; + skb_put(tx_skb, pkt_len); + alx_diag_build_pkt(tx_skb, hw->mac_addr, i, mss != 0); + dma = dma_map_single(&adpt->pdev->dev, tx_skb->data, pkt_len, + DMA_TO_DEVICE); + if (dma_mapping_error(&adpt->pdev->dev, dma)) { + dev_kfree_skb(tx_skb); + return -EIO; + } + + txq = adpt->qnapi[i % adpt->nr_txq]->txq; + tpd = txq->tpd_hdr + txq->pidx; + memset(tpd, 0, sizeof(struct tpd_desc)); + + if (mss) { + tpd->word1 |= 1 << TPD_IPV4_SHIFT; + tpd->word1 |= 1 << TPD_LSO_EN_SHIFT; + tpd->word1 |= FIELDX(TPD_MSS, mss); + tpd->word1 |= FIELDX(TPD_L4HDROFFSET, + ETH_HLEN + + ALX_TSO_IP_HDR_LEN + + ALX_TSO_IP_OPT_LEN); + } + tpd->adrl.addr = cpu_to_le64(dma); + FIELD_SET32(tpd->word0, TPD_BUFLEN, pkt_len); + tpd->word1 |= 1 << TPD_EOP_SHIFT; + + if (++txq->pidx == txq->count) + txq->pidx = 0; + wmb(); + ALX_MEM_W16(hw, txq->p_reg, txq->pidx); + + /* wait packet loopbacked. */ + q_idx = alx_diag_pkt_to_rxq(adpt, tx_skb); + seg_idx = 0; + + for (j = 0; j < 500 && nr_pkts > 0; j++) { + udelay(100); + err = alx_diag_rx_pkt(adpt, q_idx, &rx_skb); + if (err > 0) + continue; + if (err < 0) { + err = -EIO; + goto out; + } + + /* got 1 packet/segment, compare */ + err = alx_diag_cmp_pkts(tx_skb, rx_skb, mss, seg_idx); + dev_kfree_skb(rx_skb); + if (err) { + err = -EIO; + goto out; + } + seg_idx++; + nr_pkts--; + } + if (err) { + err = -EIO; + goto out; + } + dev_kfree_skb(tx_skb); + } + + return 0; + +out: + dma_unmap_single(&adpt->pdev->dev, dma, tx_skb->len, DMA_TO_DEVICE); + dev_kfree_skb(tx_skb); + return err; +} + +enum ALX_LPBK_MODE { + ALX_LPBK_MAC = 0, + ALX_LPBK_PHY, +}; +static int alx_diag_speed[] = {SPEED_1000, SPEED_100, SPEED_10}; + +static int alx_diag_lpbk_init_hw(struct alx_adapter *adpt, int mode, int speed) +{ + struct alx_hw *hw = &adpt->hw; + int i, err; + u32 val; + + alx_reset_pcie(hw); + alx_reset_phy(hw, false); + err = alx_reset_mac(hw); + if (err) { + netif_err(adpt, hw, adpt->netdev, + "loopback: reset mac fail, err=%d\n", err); + goto err_hw; + } + hw->rx_ctrl |= ALX_MAC_CTRL_LPBACK_EN | + ALX_MAC_CTRL_DBG_EN; + + /* PHY configuration */ + if (hw->is_fpga) { + if (mode == ALX_LPBK_MAC) + goto cfg_hw; + netif_err(adpt, hw, adpt->netdev, + "loopback: FPGA not support PHY external lpbk!\n"); + goto err_hw; + } + err = alx_write_phy_reg(hw, 16, 0x0800); + if (err) { + netif_err(adpt, hw, adpt->netdev, + "loopback: fix channel, err=%d\n", err); + goto err_hw; + } + switch (speed) { + case SPEED_1000: + alx_write_phy_dbg(hw, 0x11, 0x5553); + alx_write_phy_reg(hw, MII_BMCR, 0x8140); + break; + case SPEED_100: + alx_write_phy_reg(hw, MII_BMCR, 0xA100); + break; + default: + alx_write_phy_reg(hw, MII_BMCR, 0x8100); + break; + } + msleep(100); + + if (mode == ALX_LPBK_PHY) { + u16 spd; + bool linkup; + + /* wait for link */ + for (i = 0; i < 50; i++) { + msleep(100); + err = alx_get_phy_link(hw, &linkup, &spd); + if (err) + goto err_hw; + if (linkup) + break; + } + if (!linkup) { + netif_err(adpt, hw, adpt->netdev, + "no link, check your External-Loopback-Connector !\n"); + goto err_hw; + } + hw->rx_ctrl &= ~ALX_MAC_CTRL_LPBACK_EN; + } + +cfg_hw: + alx_init_intr(adpt); + alx_init_def_rss_idt(adpt); + err = alx_setup_all_ring_resources(adpt); + if (err) + goto out; + alx_configure(adpt); + /* disable clk gate for loopback */ + ALX_MEM_W32(hw, ALX_CLK_GATE, 0); + /* disable PLL clk switch for loopback */ + ALX_MEM_R32(hw, ALX_PHY_CTRL, &val); + ALX_MEM_W32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_PLL_ON); + hw->link_duplex = FULL_DUPLEX; + hw->link_speed = speed; + hw->link_up = true; + alx_enable_aspm(hw, false, false); + alx_start_mac(hw); + goto out; +err_hw: + err = -EIO; +out: + return err; +} + +static void alx_diag_lpbk_deinit_hw(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + u32 val; + + hw->link_up = false; + hw->link_speed = SPEED_0; + alx_reset_mac(hw); + hw->rx_ctrl &= ~(ALX_MAC_CTRL_LPBACK_EN | ALX_MAC_CTRL_DBG_EN); + alx_enable_aspm(hw, false, false); + alx_free_all_ring_resources(adpt); + alx_disable_advanced_intr(adpt); + /* enable PLL clk switch for loopback */ + ALX_MEM_R32(hw, ALX_PHY_CTRL, &val); + ALX_MEM_W32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_PLL_ON); +} + +static int alx_diag_loopback(struct alx_adapter *adpt, u64 *data, bool phy_lpbk) +{ + struct alx_hw *hw = &adpt->hw; + int i, err; + + if (hw->is_fpga && phy_lpbk) + return 0; + + for (i = 0; i < ARRAY_SIZE(alx_diag_speed); i++) { + err = alx_diag_lpbk_init_hw( + adpt, + phy_lpbk ? ALX_LPBK_PHY : ALX_LPBK_MAC, + alx_diag_speed[i]); + if (err) { + *data = i + 1; + goto out; + } + err = alx_diag_lpbk_run_packets(adpt); + if (err) { + *data = i + 10; + goto out; + } + alx_diag_lpbk_deinit_hw(adpt); + } +out: + if (err) + alx_diag_lpbk_deinit_hw(adpt); + + return err; +} + +static void alx_self_test(struct net_device *netdev, + struct ethtool_test *etest, + u64 *data) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + bool if_running = netif_running(netdev); + bool phy_lpback = etest->flags & ETH_TEST_FL_EXTERNAL_LB; + + ALX_FLAG_SET(adpt, TESTING); + memset(data, 0, sizeof(u64) * ALX_TEST_LEN); + + if (if_running) + dev_close(netdev); + + if (etest->flags == ETH_TEST_FL_OFFLINE) { + netif_info(adpt, hw, adpt->netdev, "offline test start...\n"); + + if (alx_diag_register(adpt, &data[0])) + etest->flags |= ETH_TEST_FL_FAILED; + + if (alx_diag_sram(adpt, &data[1])) + etest->flags |= ETH_TEST_FL_FAILED; + + if (alx_diag_interrupt(adpt, &data[2])) + etest->flags |= ETH_TEST_FL_FAILED; + + if (phy_lpback) + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + if (alx_diag_loopback(adpt, &data[3], phy_lpback)) + etest->flags |= ETH_TEST_FL_FAILED; + + } else { + netif_info(adpt, hw, adpt->netdev, "online test start...\n"); + + if (alx_diag_link(adpt, &data[4])) + etest->flags |= ETH_TEST_FL_FAILED; + } + + ALX_FLAG_CLEAR(adpt, TESTING); + alx_diag_reset(adpt); + + if (if_running) + dev_open(netdev); +} + +static void alx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *estats, u64 *data) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + + spin_lock(&adpt->smb_lock); + + __alx_update_hw_stats(hw); + memcpy(data, &hw->stats, sizeof(hw->stats)); + + spin_unlock(&adpt->smb_lock); +} + +static u32 alx_get_priv_flags(struct net_device *netdev) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + + return adpt->eth_pflags; +} + +static int alx_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + + adpt->eth_pflags = flags; + + return 0; +} + +static void alx_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + + strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, alx_drv_version, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adpt->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = ALX_STATS_LEN; + drvinfo->testinfo_len = ALX_TEST_LEN; + drvinfo->n_priv_flags = 5; + drvinfo->regdump_len = alx_get_regs_len(netdev); + drvinfo->eedump_len = 0; +} + +static const struct ethtool_ops alx_ethtool_ops = { + .get_settings = alx_get_settings, + .set_settings = alx_set_settings, + .get_pauseparam = alx_get_pauseparam, + .set_pauseparam = alx_set_pauseparam, + .get_drvinfo = alx_get_drvinfo, + .get_regs_len = alx_get_regs_len, + .get_regs = alx_get_regs, + .get_wol = alx_get_wol, + .set_wol = alx_set_wol, + .get_msglevel = alx_get_msglevel, + .set_msglevel = alx_set_msglevel, + .nway_reset = alx_nway_reset, + .get_link = ethtool_op_get_link, + .get_strings = alx_get_strings, + .get_sset_count = alx_get_sset_count, + .get_ethtool_stats = alx_get_ethtool_stats, + .self_test = alx_self_test, + .get_priv_flags = alx_get_priv_flags, + .set_priv_flags = alx_set_priv_flags, +}; + +void alx_set_ethtool_ops(struct net_device *dev) +{ + SET_ETHTOOL_OPS(dev, &alx_ethtool_ops); +} + diff -Nurp linux-3.8.11/drivers/net/ethernet/atheros/alx/alx.h linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx.h --- linux-3.8.11/drivers/net/ethernet/atheros/alx/alx.h 1970-01-01 02:00:00.000000000 +0200 +++ linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx.h 2013-05-05 13:51:31.000000000 +0300 @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _ALX_H_ +#define _ALX_H_ + +#define ALX_WATCHDOG_TIME (5 * HZ) + +/* alx_ring_header is a single, contiguous block of memory space + * used by the three descriptor rings (tpd, rfd, rrd) + */ +struct alx_ring_header { + /* virt addr */ + void *desc; + /* phy addr */ + dma_addr_t dma; + u32 size; +}; + +/* alx_buffer wraps around a pointer to a socket buffer + * so a DMA physical address can be stored along with the skb + */ +struct alx_buffer { + struct sk_buff *skb; + /* DMA address */ + DEFINE_DMA_UNMAP_ADDR(dma); + /* buffer size */ + DEFINE_DMA_UNMAP_LEN(size); + /* information of this buffer */ + u16 flags; +}; +#define ALX_BUF_TX_FIRSTFRAG 0x1 + +/* rx queue */ +struct alx_rx_queue { + struct net_device *netdev; + /* device pointer for dma operation */ + struct device *dev; + /* rrd ring virtual addr */ + struct rrd_desc *rrd_hdr; + /* rrd ring physical addr */ + dma_addr_t rrd_dma; + /* rfd ring virtual addr */ + struct rfd_desc *rfd_hdr; + /* rfd ring physical addr */ + dma_addr_t rfd_dma; + /* info for rx-skbs */ + struct alx_buffer *bf_info; + + /* number of ring elements */ + u16 count; + /* rfd producer index */ + u16 pidx; + /* rfd consumer index */ + u16 cidx; + u16 rrd_cidx; + /* register saving producer index */ + u16 p_reg; + /* register saving consumer index */ + u16 c_reg; + /* queue index */ + u16 qidx; + unsigned long flag; + + struct sk_buff_head list; +}; +#define ALX_RQ_USING 1 +#define ALX_RX_ALLOC_THRESH 32 + +/* tx queue */ +struct alx_tx_queue { + struct net_device *netdev; + /* device pointer for dma operation */ + struct device *dev; + /* tpd ring virtual addr */ + struct tpd_desc *tpd_hdr; + dma_addr_t tpd_dma; + /* info for tx-skbs pending on HW */ + struct alx_buffer *bf_info; + /* number of ring elements */ + u16 count; + /* producer index */ + u16 pidx; + /* consumer index */ + atomic_t cidx; + /* register saving producer index */ + u16 p_reg; + /* register saving consumer index */ + u16 c_reg; + /* queue index */ + u16 qidx; +}; + +#define ALX_TX_WAKEUP_THRESH(_tq) ((_tq)->count / 4) +#define ALX_DEFAULT_TX_WORK 128 + +struct alx_napi { + struct napi_struct napi; + struct alx_adapter *adpt; + struct alx_rx_queue *rxq; + struct alx_tx_queue *txq; + int vec_idx; + u32 vec_mask; + char irq_lbl[IFNAMSIZ]; +}; + +enum ALX_FLAGS { + ALX_FLAG_USING_MSIX = 0, + ALX_FLAG_USING_MSI, + ALX_FLAG_RESETING, + ALX_FLAG_TESTING, + ALX_FLAG_HALT, + ALX_FLAG_FPGA, + ALX_FLAG_TASK_PENDING, + ALX_FLAG_TASK_CHK_LINK, + ALX_FLAG_TASK_RESET, + ALX_FLAG_TASK_UPDATE_SMB, + + ALX_FLAG_NUMBER_OF_FLAGS, +}; + + +struct alx_hw; +/* + *board specific private data structure + */ +struct alx_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + + struct alx_hw hw; + + u16 bd_number; + + /* totally msix vectors */ + int nr_vec; + struct msix_entry *msix_ent; + + /* all descriptor memory */ + struct alx_ring_header ring_header; + int tx_ringsz; + int rx_ringsz; + int rxbuf_size; + + struct alx_napi *qnapi[8]; + /* number of napi for TX-Q */ + int nr_txq; + /* number of napi for RX-Q */ + int nr_rxq; + /* number independent hw RX-Q */ + int nr_hwrxq; + /* total napi for TX-Q/RX-Q */ + int nr_napi; + + /* lock for updating stats */ + spinlock_t smb_lock; + + struct work_struct task; + struct net_device_stats net_stats; + atomic_t irq_sem; + u16 msg_enable; + + unsigned long flags; + + /* ethtool private flags */ + u32 eth_pflags; + int eth_diag_vect; + int eth_diag_cnt; +}; + + +#define ALX_FLAG(_adpt, _FLAG) (\ + test_bit(ALX_FLAG_##_FLAG, &(_adpt)->flags)) +#define ALX_FLAG_SET(_adpt, _FLAG) (\ + set_bit(ALX_FLAG_##_FLAG, &(_adpt)->flags)) +#define ALX_FLAG_CLEAR(_adpt, _FLAG) (\ + clear_bit(ALX_FLAG_##_FLAG, &(_adpt)->flags)) + +static inline struct alx_rx_queue *alx_hw_rxq(struct alx_rx_queue *rxq) +{ + struct alx_adapter *adpt = netdev_priv(rxq->netdev); + + return ALX_CAP(&adpt->hw, MRQ) ? rxq : adpt->qnapi[0]->rxq; +} + +/* needed by alx_ethtool.c */ +extern void alx_configure(struct alx_adapter *adpt); +extern void alx_free_all_ring_resources(struct alx_adapter *adpt); +extern int alx_setup_all_ring_resources(struct alx_adapter *adpt); +extern void alx_init_def_rss_idt(struct alx_adapter *adpt); +extern int alx_alloc_rxring_buf(struct alx_adapter *adpt, + struct alx_rx_queue *rxq); +extern void alx_init_intr(struct alx_adapter *adpt); +extern void alx_disable_advanced_intr(struct alx_adapter *adpt); +extern void alx_reinit(struct alx_adapter *adpt, bool in_task); +extern void alx_set_ethtool_ops(struct net_device *dev); +extern char alx_drv_name[]; +extern char alx_drv_version[]; + +#endif diff -Nurp linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_hw.c linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_hw.c --- linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_hw.c 1970-01-01 02:00:00.000000000 +0200 +++ linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_hw.c 2013-05-05 13:51:31.000000000 +0300 @@ -0,0 +1,1461 @@ +/* + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include <linux/pci.h> +#include <linux/crc32.h> +#include <linux/etherdevice.h> +#include <linux/mdio.h> + +#include "alx_reg.h" +#include "alx_hw.h" + +#define ALX_REV_A(_r) ((_r) == ALX_REV_A0 || (_r) == ALX_REV_A1) + +/* get permanent mac address from */ +int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr) +{ + u32 val, mac0, mac1; + u16 flag, i; + +#define INTN_LOADED 0x1 +#define EXTN_LOADED 0x2 + + flag = 0; + val = 0; + +read_mcadr: + + /* get it from register first */ + ALX_MEM_R32(hw, ALX_STAD0, &mac0); + ALX_MEM_R32(hw, ALX_STAD1, &mac1); + + /* addr should be big-endian */ + *(__be32 *)(addr + 2) = cpu_to_be32(mac0); + *(__be16 *)addr = cpu_to_be16((u16)mac1); + + if (is_valid_ether_addr(addr)) + return 0; + + if ((flag & INTN_LOADED) == 0) { + /* load from efuse ? */ + for (i = 0; i < ALX_SLD_MAX_TO; i++) { + ALX_MEM_R32(hw, ALX_SLD, &val); + if ((val & (ALX_SLD_STAT | ALX_SLD_START)) == 0) + break; + mdelay(1); + } + if (i == ALX_SLD_MAX_TO) + goto out; + ALX_MEM_W32(hw, ALX_SLD, val | ALX_SLD_START); + for (i = 0; i < ALX_SLD_MAX_TO; i++) { + mdelay(1); + ALX_MEM_R32(hw, ALX_SLD, &val); + if ((val & ALX_SLD_START) == 0) + break; + } + if (i == ALX_SLD_MAX_TO) + goto out; + flag |= INTN_LOADED; + goto read_mcadr; + } + + if ((flag & EXTN_LOADED) == 0) { + ALX_MEM_R32(hw, ALX_EFLD, &val); + if ((val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) != 0) { + /* load from eeprom/flash ? */ + for (i = 0; i < ALX_SLD_MAX_TO; i++) { + ALX_MEM_R32(hw, ALX_EFLD, &val); + if ((val & (ALX_EFLD_STAT | + ALX_EFLD_START)) == 0) { + break; + } + mdelay(1); + } + if (i == ALX_SLD_MAX_TO) + goto out; + ALX_MEM_W32(hw, ALX_EFLD, val | ALX_EFLD_START); + for (i = 0; i < ALX_SLD_MAX_TO; i++) { + mdelay(1); + ALX_MEM_R32(hw, ALX_EFLD, &val); + if ((val & ALX_EFLD_START) == 0) + break; + } + if (i == ALX_SLD_MAX_TO) + goto out; + flag |= EXTN_LOADED; + goto read_mcadr; + } + } + +out: + return ALX_ERR_ALOAD; +} + +void alx_set_macaddr(struct alx_hw *hw, u8 *addr) +{ + u32 val; + + /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */ + val = be32_to_cpu(*(__be32 *)(addr + 2)); + ALX_MEM_W32(hw, ALX_STAD0, val); + val = be16_to_cpu(*(__be16 *)addr); + ALX_MEM_W32(hw, ALX_STAD1, val); +} + +void alx_add_mc_addr(struct alx_hw *hw, u8 *addr) +{ + u32 crc32, bit, reg; + + crc32 = ether_crc(ETH_ALEN, addr); + + /* The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + reg = (crc32 >> 31) & 0x1; + bit = (crc32 >> 26) & 0x1F; + + hw->mc_hash[reg] |= (0x1 << bit); +} + +void alx_enable_osc(struct alx_hw *hw) +{ + u32 val; + + /* rising edge */ + ALX_MEM_R32(hw, ALX_MISC, &val); + ALX_MEM_W32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN); + ALX_MEM_W32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); +} + +void alx_reset_osc(struct alx_hw *hw, u8 rev) +{ + u32 val, val2; + + /* clear Internal OSC settings, switching OSC by hw itself */ + ALX_MEM_R32(hw, ALX_MISC3, &val); + ALX_MEM_W32(hw, ALX_MISC3, + (val & ~ALX_MISC3_25M_BY_SW) | ALX_MISC3_25M_NOTO_INTNL); + + /* 25M clk from chipset may be unstable 1s after de-assert of + * PERST, driver need re-calibrate before enter Sleep for WoL + */ + ALX_MEM_R32(hw, ALX_MISC, &val); + if (rev >= ALX_REV_B0) { + /* restore over current protection def-val, + * this val could be reset by MAC-RST + */ + FIELD_SET32(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF); + /* a 0->1 change will update the internal val of osc */ + val &= ~ALX_MISC_INTNLOSC_OPEN; + ALX_MEM_W32(hw, ALX_MISC, val); + ALX_MEM_W32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); + /* hw will automatically dis OSC after cab. */ + ALX_MEM_R32(hw, ALX_MSIC2, &val2); + val2 &= ~ALX_MSIC2_CALB_START; + ALX_MEM_W32(hw, ALX_MSIC2, val2); + ALX_MEM_W32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START); + } else { + val &= ~ALX_MISC_INTNLOSC_OPEN; + /* disable isoloate for A0 */ + if (ALX_REV_A(rev)) + val &= ~ALX_MISC_ISO_EN; + + ALX_MEM_W32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); + ALX_MEM_W32(hw, ALX_MISC, val); + } + + udelay(20); +} + +int alx_reset_mac(struct alx_hw *hw) +{ + u32 val, pmctrl; + int i, ret; + u8 rev; + bool a_cr; + + pmctrl = 0; + rev = (u8)ALX_REVID(hw); + a_cr = ALX_REV_A(rev) && ALX_WITH_CR(hw); + + /* disable all interrupts, RXQ/TXQ */ + ALX_MEM_W32(hw, ALX_MSIX_MASK, 0xFFFFFFFF); + ALX_MEM_W32(hw, ALX_IMR, 0); + ALX_MEM_W32(hw, ALX_ISR, ALX_ISR_DIS); + + ret = alx_stop_mac(hw); + if (ret) + return ret; + + /* mac reset workaroud */ + ALX_MEM_W32(hw, ALX_RFD_PIDX, 1); + + /* dis l0s/l1 before mac reset */ + if (a_cr) { + ALX_MEM_R32(hw, ALX_PMCTRL, &pmctrl); + if ((pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) != 0) { + ALX_MEM_W32(hw, ALX_PMCTRL, + pmctrl & ~(ALX_PMCTRL_L1_EN | + ALX_PMCTRL_L0S_EN)); + } + } + + /* reset whole mac safely */ + ALX_MEM_R32(hw, ALX_MASTER, &val); + ALX_MEM_W32(hw, ALX_MASTER, + val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS); + + /* make sure it's real idle */ + udelay(10); + for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { + ALX_MEM_R32(hw, ALX_RFD_PIDX, &val); + if (val == 0) + break; + udelay(10); + } + for (; i < ALX_DMA_MAC_RST_TO; i++) { + ALX_MEM_R32(hw, ALX_MASTER, &val); + if ((val & ALX_MASTER_DMA_MAC_RST) == 0) + break; + udelay(10); + } + if (i == ALX_DMA_MAC_RST_TO) + return ALX_ERR_RSTMAC; + udelay(10); + + if (a_cr) { + /* set ALX_MASTER_PCLKSEL_SRDS (affect by soft-rst, PERST) */ + ALX_MEM_W32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS); + /* resoter l0s / l1 */ + if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) + ALX_MEM_W32(hw, ALX_PMCTRL, pmctrl); + } + + alx_reset_osc(hw, rev); + /* clear Internal OSC settings, switching OSC by hw itself, + * disable isoloate for A version + */ + ALX_MEM_R32(hw, ALX_MISC3, &val); + ALX_MEM_W32(hw, ALX_MISC3, + (val & ~ALX_MISC3_25M_BY_SW) | ALX_MISC3_25M_NOTO_INTNL); + ALX_MEM_R32(hw, ALX_MISC, &val); + val &= ~ALX_MISC_INTNLOSC_OPEN; + if (ALX_REV_A(rev)) + val &= ~ALX_MISC_ISO_EN; + ALX_MEM_W32(hw, ALX_MISC, val); + udelay(20); + + /* driver control speed/duplex, hash-alg */ + ALX_MEM_W32(hw, ALX_MAC_CTRL, hw->rx_ctrl); + + /* clk sw */ + ALX_MEM_R32(hw, ALX_SERDES, &val); + ALX_MEM_W32(hw, ALX_SERDES, + val | ALX_SERDES_MACCLK_SLWDWN | ALX_SERDES_PHYCLK_SLWDWN); + + /* mac reset cause MDIO ctrl restore non-polling status */ + if (hw->is_fpga) + __alx_start_phy_polling(hw, ALX_MDIO_CLK_SEL_25MD128); + + + return ret; +} + +/* alx_reset_phy + * completely reset phy, all settings/workaround will be re-configureed + * hib_en: enable/disable hibernation on PHY + */ +void alx_reset_phy(struct alx_hw *hw, bool hib_en) +{ + int i; + u32 val; + u16 phy_val; + + /* (DSP)reset PHY core */ + ALX_MEM_R32(hw, ALX_PHY_CTRL, &val); + val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ | + ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN | + ALX_PHY_CTRL_CLS); + val |= ALX_PHY_CTRL_RST_ANALOG; + + if (hib_en) + val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN); + else + val &= ~(ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN); + ALX_MEM_W32(hw, ALX_PHY_CTRL, val); + udelay(10); + ALX_MEM_W32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT); + + /* delay 800us */ + for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++) + udelay(10); + + if (hw->is_fpga) + goto set_imr; + + /* phy power saving & hib */ + if (hib_en) { + alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL, + ALX_SYSMODCTRL_IECHOADJ_DEF); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS, + ALX_VDRVBIAS_DEF); + } else { + alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, + ALX_LEGCYPS_DEF & ~ALX_LEGCYPS_EN); + alx_write_phy_dbg(hw, ALX_MIIDBG_HIBNEG, ALX_HIBNEG_NOHIB); + alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG, ALX_GREENCFG_DEF); + } + + /* EEE advertisement */ + if (ALX_CAP(hw, AZ)) { + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, + ALX_MIIEXT_LOCAL_EEEADV, + ALX_CAP(hw, GIGA) ? + ALX_LOCAL_EEEADV_1000BT | ALX_LOCAL_EEEADV_100BT : + ALX_LOCAL_EEEADV_100BT); + /* half amplify */ + alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, + ALX_AZ_ANADECT_DEF); + } else { + ALX_MEM_R32(hw, ALX_LPI_CTRL, &val); + ALX_MEM_W32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, + ALX_MIIEXT_LOCAL_EEEADV, 0); + } + + /* phy power saving */ + alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF); + alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); + alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, + phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN); + /* rtl8139c, 120m issue */ + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78, + ALX_MIIEXT_NLP78_120M_DEF); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10, + ALX_MIIEXT_S3DIG10_DEF); + + if (hw->lnk_patch) { + /* Turn off half amplitude */ + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, + phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT); + /* Turn off Green feature */ + alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); + alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, + phy_val | ALX_GREENCFG2_BP_GREEN); + /* Turn off half Bias */ + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, + phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS); + } + +set_imr: + /* set phy interrupt mask */ + alx_write_phy_reg(hw, ALX_MII_IER, + ALX_IER_LINK_UP | ALX_IER_LINK_DOWN); +} + +#define ALX_PCI_CMD (\ + PCI_COMMAND_MASTER |\ + PCI_COMMAND_MEMORY |\ + PCI_COMMAND_IO) +/* + * alx_reset_pcie + * reset pcie relative registers (pci command, clk, aspm...) + */ +void alx_reset_pcie(struct alx_hw *hw) +{ + u32 val; + u16 val16; + u8 rev = (u8)ALX_REVID(hw); + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + ALX_CFG_R16(hw, PCI_COMMAND, &val16); + if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) { + val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE; + ALX_CFG_W16(hw, PCI_COMMAND, val16); + } + + /* clear WoL setting/status */ + ALX_MEM_R32(hw, ALX_WOL0, &val); + ALX_MEM_W32(hw, ALX_WOL0, 0); + + /* deflt val of PDLL D3PLLOFF */ + ALX_MEM_R32(hw, ALX_PDLL_TRNS1, &val); + ALX_MEM_W32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN); + + /* mask some pcie error bits */ + ALX_MEM_R32(hw, ALX_UE_SVRT, &val); + val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR); + ALX_MEM_W32(hw, ALX_UE_SVRT, val); + + /* wol 25M & pclk */ + ALX_MEM_R32(hw, ALX_MASTER, &val); + if (ALX_REV_A(rev) && ALX_WITH_CR(hw)) { + if ((val & ALX_MASTER_WAKEN_25M) == 0 || + (val & ALX_MASTER_PCLKSEL_SRDS) == 0) { + ALX_MEM_W32(hw, ALX_MASTER, + val | ALX_MASTER_PCLKSEL_SRDS | + ALX_MASTER_WAKEN_25M); + } + } else { + if ((val & ALX_MASTER_WAKEN_25M) == 0 || + (val & ALX_MASTER_PCLKSEL_SRDS) != 0) { + ALX_MEM_W32(hw, ALX_MASTER, + (val & ~ALX_MASTER_PCLKSEL_SRDS) | + ALX_MASTER_WAKEN_25M); + } + } + + /* ASPM setting */ + alx_enable_aspm(hw, ALX_CAP(hw, L0S), ALX_CAP(hw, L1)); + + udelay(10); +} + +/* alx_stop_mac + * stop the mac, transmit & receive modules + * return : 0 if ok, none-0 if busy + */ +int alx_stop_mac(struct alx_hw *hw) +{ + u32 rxq, txq, val; + u16 i; + + ALX_MEM_R32(hw, ALX_RXQ0, &rxq); + ALX_MEM_W32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN); + ALX_MEM_R32(hw, ALX_TXQ0, &txq); + ALX_MEM_W32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN); + + udelay(40); + + hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); + ALX_MEM_W32(hw, ALX_MAC_CTRL, hw->rx_ctrl); + + for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { + ALX_MEM_R32(hw, ALX_MAC_STS, &val); + if (!(val & ALX_MAC_STS_IDLE)) + break; + udelay(10); + } + + return (ALX_DMA_MAC_RST_TO == i) ? ALX_ERR_RSTMAC : 0; +} + +/* alx_start_mac + * enable rx/tx MAC module + */ +void alx_start_mac(struct alx_hw *hw) +{ + u32 mac, txq, rxq; + + ALX_MEM_R32(hw, ALX_RXQ0, &rxq); + ALX_MEM_W32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN); + ALX_MEM_R32(hw, ALX_TXQ0, &txq); + ALX_MEM_W32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); + + mac = hw->rx_ctrl; + if (hw->link_duplex == FULL_DUPLEX) + mac |= ALX_MAC_CTRL_FULLD; + else + mac &= ~ALX_MAC_CTRL_FULLD; + FIELD_SET32(mac, ALX_MAC_CTRL_SPEED, hw->link_speed == SPEED_1000 ? + ALX_MAC_CTRL_SPEED_1000 : ALX_MAC_CTRL_SPEED_10_100); + mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN; + hw->rx_ctrl = mac; + ALX_MEM_W32(hw, ALX_MAC_CTRL, mac); +} + +/* set flow control on MAC side */ +void alx_cfg_mac_fc(struct alx_hw *hw, u8 fc) +{ + if (fc & ALX_FC_RX) + hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN; + else + hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN; + + if (fc & ALX_FC_TX) + hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN; + else + hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN; + + ALX_MEM_W32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +/* enable/disable aspm support */ +void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en) +{ + u32 pmctrl; + u8 rev = (u8)ALX_REVID(hw); + + ALX_MEM_R32(hw, ALX_PMCTRL, &pmctrl); + + FIELD_SET32(pmctrl, ALX_PMCTRL_LCKDET_TIMER, + ALX_PMCTRL_LCKDET_TIMER_DEF); + pmctrl |= ALX_PMCTRL_RCVR_WT_1US | + ALX_PMCTRL_L1_CLKSW_EN | + ALX_PMCTRL_L1_SRDSRX_PWD ; + FIELD_SET32(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF); + FIELD_SET32(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US); + pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN | + ALX_PMCTRL_L1_SRDSPLL_EN | + ALX_PMCTRL_L1_BUFSRX_EN | + ALX_PMCTRL_SADLY_EN | + ALX_PMCTRL_HOTRST_WTEN| + ALX_PMCTRL_L0S_EN | + ALX_PMCTRL_L1_EN | + ALX_PMCTRL_ASPM_FCEN | + ALX_PMCTRL_TXL1_AFTER_L0S | + ALX_PMCTRL_RXL1_AFTER_L0S + ); + if (ALX_REV_A(rev) && ALX_WITH_CR(hw)) + pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN; + + if (l0s_en) + pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN); + if (l1_en) + pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN); + + ALX_MEM_W32(hw, ALX_PMCTRL, pmctrl); +} + + +/* translate ethtool adv /speed/duplex settting to hw specific value */ +u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg) +{ + u32 cfg = 0; + + if (ethadv_cfg & ADVERTISED_Autoneg) { + cfg |= ALX_DRV_PHY_AUTO; + if (ethadv_cfg & ADVERTISED_10baseT_Half) + cfg |= ALX_DRV_PHY_10; + if (ethadv_cfg & ADVERTISED_10baseT_Full) + cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_100baseT_Half) + cfg |= ALX_DRV_PHY_100; + if (ethadv_cfg & ADVERTISED_100baseT_Full) + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_1000baseT_Half) + cfg |= ALX_DRV_PHY_1000; + if (ethadv_cfg & ADVERTISED_1000baseT_Full) + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_Pause) + cfg |= ADVERTISE_PAUSE_CAP; + if (ethadv_cfg & ADVERTISED_Asym_Pause) + cfg |= ADVERTISE_PAUSE_ASYM; + if (ALX_CAP(hw, AZ)) + cfg |= ALX_DRV_PHY_EEE; + } else { + switch (ethadv_cfg) { + case ADVERTISED_10baseT_Half: + cfg |= ALX_DRV_PHY_10; + break; + case ADVERTISED_100baseT_Half: + cfg |= ALX_DRV_PHY_100; + break; + case ADVERTISED_10baseT_Full: + cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; + break; + case ADVERTISED_100baseT_Full: + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + break; + } + } + + return cfg; +} + +/* initialize phy for speed / flow control + * ethadv: + * format from ethtool, we use it for both autoneg and force mode + */ +int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl) +{ + u16 adv, giga, cr; + u32 val; + int err = 0; + + /* clear flag */ + alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0); + ALX_MEM_R32(hw, ALX_DRV, &val); + FIELD_SET32(val, ALX_DRV_PHY, 0); + + if (ethadv & ADVERTISED_Autoneg) { + adv = ADVERTISE_CSMA; + adv |= ethtool_adv_to_mii_adv_t(ethadv); + + if (flowctrl & ALX_FC_ANEG) { + if (flowctrl & ALX_FC_RX) { + adv |= ADVERTISED_Pause; + if (!(flowctrl & ALX_FC_TX)) + adv |= ADVERTISED_Asym_Pause; + } else if (flowctrl & ALX_FC_TX) + adv |= ADVERTISED_Asym_Pause; + } + giga = 0; + if (ALX_CAP(hw, GIGA)) + giga = ethtool_adv_to_mii_ctrl1000_t(ethadv); + + cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; + + if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) || + alx_write_phy_reg(hw, MII_CTRL1000, giga) || + alx_write_phy_reg(hw, MII_BMCR, cr)) + err = ALX_ERR_MIIBUSY; + } else { + cr = BMCR_RESET; + if (ethadv == ADVERTISED_100baseT_Half || + ethadv == ADVERTISED_100baseT_Full) + cr |= BMCR_SPEED100; + if (ethadv == ADVERTISED_10baseT_Full || + ethadv == ADVERTISED_100baseT_Full) + cr |= BMCR_FULLDPLX; + + err = alx_write_phy_reg(hw, MII_BMCR, cr); + } + + if (!err) { + alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED); + /* save config to HW */ + val |= ethadv_to_hw_cfg(hw, ethadv); + } + + ALX_MEM_W32(hw, ALX_DRV, val); + + return err; +} + + +/* do post setting on phy if link up/down event occur */ +void alx_post_phy_link(struct alx_hw *hw, u16 speed, bool az_en) +{ + u16 phy_val, len, agc; + u8 revid = (u8)ALX_REVID(hw); + bool adj_th; + + if (revid != ALX_REV_B0 && + revid != ALX_REV_A1 && + revid != ALX_REV_A0) { + return; + } + adj_th = (revid == ALX_REV_B0) ? true : false; + + /* 1000BT/AZ, wrong cable length */ + if (speed != SPEED_0) { + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6, + &phy_val); + len = FIELD_GETX(phy_val, ALX_CLDCTRL6_CAB_LEN); + alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val); + agc = FIELD_GETX(phy_val, ALX_AGC_2_VGA); + + if ((speed == SPEED_1000 && + (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G || + (0 == len && agc > ALX_AGC_LONG1G_LIMT))) || + (speed == SPEED_100 && + (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M || + (0 == len && agc > ALX_AGC_LONG100M_LIMT)))) { + alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, + ALX_AZ_ANADECT_LONG); + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val | ALX_AFE_10BT_100M_TH); + } else { + alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, + ALX_AZ_ANADECT_DEF); + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, + ALX_MIIEXT_AFE, &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val & ~ALX_AFE_10BT_100M_TH); + } + + /* threashold adjust */ + if (adj_th && hw->lnk_patch) { + if (speed == SPEED_100) { + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, + ALX_MSE16DB_UP); + } else if (speed == SPEED_1000) { + /* + * Giga link threshold, raise the tolerance of + * noise 50% + */ + alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, + &phy_val); + FIELD_SETS(phy_val, ALX_MSE20DB_TH, + ALX_MSE20DB_TH_HI); + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, + phy_val); + } + } + /* phy link-down in 1000BT/AZ mode */ + if (az_en && revid == ALX_REV_B0 && speed == SPEED_1000) { + alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, + ALX_SRDSYSMOD_DEF & ~ALX_SRDSYSMOD_DEEMP_EN); + } + } else { + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val & ~ALX_AFE_10BT_100M_TH); + + if (adj_th && hw->lnk_patch) { + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, + ALX_MSE16DB_DOWN); + alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val); + FIELD_SETS(phy_val, ALX_MSE20DB_TH, ALX_MSE20DB_TH_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val); + } + if (az_en && revid == ALX_REV_B0) { + alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, + ALX_SRDSYSMOD_DEF); + } + } +} + + +/* do power saving setting befor enter suspend mode + * NOTE: + * 1. phy link must be established before calling this function + * 2. wol option (pattern,magic,link,etc.) is configed before call it. + */ +int alx_pre_suspend(struct alx_hw *hw, u16 speed) +{ + u32 master, mac, phy, val; + int err = 0; + + ALX_MEM_R32(hw, ALX_MASTER, &master); + master &= ~ALX_MASTER_PCLKSEL_SRDS; + mac = hw->rx_ctrl; + /* 10/100 half */ + FIELD_SET32(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100); + mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); + + ALX_MEM_R32(hw, ALX_PHY_CTRL, &phy); + phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS); + phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE | + ALX_PHY_CTRL_HIB_EN; + + /* without any activity */ + if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) { + err = alx_write_phy_reg(hw, ALX_MII_IER, 0); + phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN; + goto config_reg; + } + + if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS)) + mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN; + if (hw->sleep_ctrl & ALX_SLEEP_CIFS) + mac |= ALX_MAC_CTRL_TX_EN; + if (speed % 10 == FULL_DUPLEX) + mac |= ALX_MAC_CTRL_FULLD; + if (speed >= SPEED_1000) + FIELD_SET32(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_1000); + phy |= ALX_PHY_CTRL_DSPRST_OUT; + err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, + ALX_MIIEXT_S3DIG10, ALX_MIIEXT_S3DIG10_SL); +config_reg: + + if (!err) { + alx_enable_osc(hw); + hw->rx_ctrl = mac; + ALX_MEM_W32(hw, ALX_MASTER, master); + ALX_MEM_W32(hw, ALX_MAC_CTRL, mac); + ALX_MEM_W32(hw, ALX_PHY_CTRL, phy); + + /* set val of PDLL D3PLLOFF */ + ALX_MEM_R32(hw, ALX_PDLL_TRNS1, &val); + val |= ALX_PDLL_TRNS1_D3PLLOFF_EN; + ALX_MEM_W32(hw, ALX_PDLL_TRNS1, val); + } + + return err; +} + +/* wait mdio module to be idle */ +bool __alx_wait_mdio_idle(struct alx_hw *hw) +{ + u32 val; + int i; + + for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) { + ALX_MEM_R32(hw, ALX_MDIO, &val); + if (!(val & ALX_MDIO_BUSY)) + break; + udelay(10); + } + return i != ALX_MDIO_MAX_AC_TO; +} + +void __alx_stop_phy_polling(struct alx_hw *hw) +{ + if (!hw->is_fpga) + return; + + ALX_MEM_W32(hw, ALX_MDIO, 0); + __alx_wait_mdio_idle(hw); +} + +void __alx_start_phy_polling(struct alx_hw *hw, u16 clk_sel) +{ + u32 val; + + if (!hw->is_fpga) + return; + + val = ALX_MDIO_SPRES_PRMBL | + FIELDX(ALX_MDIO_CLK_SEL, clk_sel) | + FIELDX(ALX_MDIO_REG, 1) | + ALX_MDIO_START | + ALX_MDIO_OP_READ; + ALX_MEM_W32(hw, ALX_MDIO, val); + __alx_wait_mdio_idle(hw); + val |= ALX_MDIO_AUTO_POLLING; + val &= ~ALX_MDIO_START; + ALX_MEM_W32(hw, ALX_MDIO, val); + udelay(30); +} + +/* __alx_read_phy_core + * core function to read register in PHY via MDIO interface + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: register to read + */ +int __alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data) +{ + u32 val, clk_sel; + int err; + + __alx_stop_phy_polling(hw); + + *phy_data = 0; + + /* use slow clock when it's in hibernation status */ + clk_sel = !hw->link_up ? + ALX_MDIO_CLK_SEL_25MD128 : ALX_MDIO_CLK_SEL_25MD4; + + if (ext) { + val = FIELDX(ALX_MDIO_EXTN_DEVAD, dev) | + FIELDX(ALX_MDIO_EXTN_REG, reg); + ALX_MEM_W32(hw, ALX_MDIO_EXTN, val); + + val = ALX_MDIO_SPRES_PRMBL | + FIELDX(ALX_MDIO_CLK_SEL, clk_sel) | + ALX_MDIO_START | + ALX_MDIO_MODE_EXT | + ALX_MDIO_OP_READ; + } else { + val = ALX_MDIO_SPRES_PRMBL | + FIELDX(ALX_MDIO_CLK_SEL, clk_sel) | + FIELDX(ALX_MDIO_REG, reg) | + ALX_MDIO_START | + ALX_MDIO_OP_READ; + } + ALX_MEM_W32(hw, ALX_MDIO, val); + + if (unlikely(!__alx_wait_mdio_idle(hw))) + err = ALX_ERR_MIIBUSY; + else { + ALX_MEM_R32(hw, ALX_MDIO, &val); + *phy_data = (u16)FIELD_GETX(val, ALX_MDIO_DATA); + err = 0; + } + + __alx_start_phy_polling(hw, clk_sel); + + return err; +} + +/* __alx_write_phy_core + * core function to write to register in PHY via MDIO interface + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: register to write + */ +int __alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data) +{ + u32 val, clk_sel; + int err = 0; + + __alx_stop_phy_polling(hw); + + /* use slow clock when it's in hibernation status */ + clk_sel = !hw->link_up ? + ALX_MDIO_CLK_SEL_25MD128 : ALX_MDIO_CLK_SEL_25MD4; + + if (ext) { + val = FIELDX(ALX_MDIO_EXTN_DEVAD, dev) | + FIELDX(ALX_MDIO_EXTN_REG, reg); + ALX_MEM_W32(hw, ALX_MDIO_EXTN, val); + + val = ALX_MDIO_SPRES_PRMBL | + FIELDX(ALX_MDIO_CLK_SEL, clk_sel) | + FIELDX(ALX_MDIO_DATA, phy_data) | + ALX_MDIO_START | + ALX_MDIO_MODE_EXT; + } else { + val = ALX_MDIO_SPRES_PRMBL | + FIELDX(ALX_MDIO_CLK_SEL, clk_sel) | + FIELDX(ALX_MDIO_REG, reg) | + FIELDX(ALX_MDIO_DATA, phy_data) | + ALX_MDIO_START; + } + ALX_MEM_W32(hw, ALX_MDIO, val); + + if (unlikely(!__alx_wait_mdio_idle(hw))) + err = ALX_ERR_MIIBUSY; + + __alx_start_phy_polling(hw, clk_sel); + + return err; +} + +/* read from PHY normal register */ +int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) +{ + return __alx_read_phy_core(hw, false, 0, reg, phy_data); +} + +/* write to PHY normal register */ +int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) +{ + return __alx_write_phy_core(hw, false, 0, reg, phy_data); +} + +/* read from PHY extension register */ +int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) +{ + return __alx_read_phy_core(hw, true, dev, reg, pdata); +} + +/* write to PHY extension register */ +int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) +{ + return __alx_write_phy_core(hw, true, dev, reg, data); +} + +/* read from PHY debug port */ +int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) +{ + int err; + + err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); + if (unlikely(err)) + return err; + else + err = __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata); + + return err; +} + +/* write to PHY debug port */ +int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) +{ + int err; + + err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); + if (unlikely(err)) + return err; + else + err = __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data); + + return err; +} + +int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_reg(hw, reg, phy_data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_reg(hw, reg, phy_data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_ext(hw, dev, reg, pdata); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_ext(hw, dev, reg, data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_dbg(hw, reg, pdata); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_dbg(hw, reg, data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +u16 alx_get_phy_config(struct alx_hw *hw) +{ + u32 val; + u16 phy_val; + + ALX_MEM_R32(hw, ALX_PHY_CTRL, &val); + /* phy in rst */ + if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0) + return ALX_DRV_PHY_UNKNOWN; + + ALX_MEM_R32(hw, ALX_DRV, &val); + val = FIELD_GETX(val, ALX_DRV_PHY); + if (ALX_DRV_PHY_UNKNOWN == val) + return ALX_DRV_PHY_UNKNOWN; + + alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val); + if (ALX_PHY_INITED == phy_val) + return (u16) val; + + return ALX_DRV_PHY_UNKNOWN; +} + +bool alx_phy_configed(struct alx_hw *hw) +{ + u32 cfg, hw_cfg; + + cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg); + cfg = FIELD_GETX(cfg, ALX_DRV_PHY); + hw_cfg = alx_get_phy_config(hw); + if (hw_cfg == ALX_DRV_PHY_UNKNOWN) + return false; + + return cfg == hw_cfg; +} + +int alx_get_phy_link(struct alx_hw *hw, bool *link_up, u16 *speed) +{ + struct pci_dev *pdev = hw->pdev; + u16 bmsr, giga; + int err; + + err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); + err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); + if (unlikely(err)) + goto out; + + if (!(bmsr & BMSR_LSTATUS)) { + *link_up = false; + goto out; + } + + *link_up = true; + + /* speed/duplex result is saved in PHY Specific Status Register */ + err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga); + if (unlikely(err)) + goto out; + + if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED)) + goto wrong_spd_out; + + switch (giga & ALX_GIGA_PSSR_SPEED) { + case ALX_GIGA_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case ALX_GIGA_PSSR_100MBS: + *speed = SPEED_100; + break; + case ALX_GIGA_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + goto wrong_spd_out; + } + *speed += (giga & ALX_GIGA_PSSR_DPLX) ? FULL_DUPLEX : HALF_DUPLEX; + goto out; + +wrong_spd_out: + dev_err(&pdev->dev, "PHY SPD/DPLX unresolved :%x\n", giga); + err = -EINVAL; +out: + return err; +} + +int alx_clear_phy_intr(struct alx_hw *hw) +{ + u16 isr; + + /* clear interrupt status by read it */ + return alx_read_phy_reg(hw, ALX_MII_ISR, &isr); +} + +int alx_config_wol(struct alx_hw *hw) +{ + u32 wol; + int err = 0; + + wol = 0; + /* turn on magic packet event */ + if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) { + wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN; + /* magic packet maybe Broadcast&multicast&Unicast frame */ + /* mac |= MAC_CTRL_BC_EN; */ + } + + /* turn on link up event */ + if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) { + wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK; + /* only link up can wake up */ + err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP); + } + ALX_MEM_W32(hw, ALX_WOL0, wol); + + return err; +} + +void alx_configure_rss(struct alx_hw *hw, bool en) +{ + u32 ctrl; + int i; + + ALX_MEM_R32(hw, ALX_RXQ0, &ctrl); + + if (en) { + for (i = 0; i < sizeof(hw->rss_key); i++) { + /* rss key should be saved in chip with + * reversed order. + */ + int j = sizeof(hw->rss_key) - i - 1; + + ALX_MEM_W8(hw, ALX_RSS_KEY0 + j, hw->rss_key[i]); + } + + for (i = 0; i < ARRAY_SIZE(hw->rss_idt); i++) + ALX_MEM_W32(hw, ALX_RSS_IDT_TBL0 + i * 4, + hw->rss_idt[i]); + + FIELD_SET32(ctrl, ALX_RXQ0_RSS_HSTYP, hw->rss_hash_type); + FIELD_SET32(ctrl, ALX_RXQ0_RSS_MODE, ALX_RXQ0_RSS_MODE_MQMI); + FIELD_SET32(ctrl, ALX_RXQ0_IDT_TBL_SIZE, hw->rss_idt_size); + ctrl |= ALX_RXQ0_RSS_HASH_EN; + } else { + ctrl &= ~ALX_RXQ0_RSS_HASH_EN; + } + + ALX_MEM_W32(hw, ALX_RXQ0, ctrl); +} + +void alx_configure_basic(struct alx_hw *hw) +{ + u32 val, raw_mtu, max_payload; + u16 val16; + u8 chip_rev = ALX_REVID(hw); + + /* mac address */ + alx_set_macaddr(hw, hw->mac_addr); + + /* clk gating */ + ALX_MEM_W32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL_A0); + + /* idle timeout to switch clk_125M */ + if (chip_rev >= ALX_REV_B0) { + ALX_MEM_W32(hw, ALX_IDLE_DECISN_TIMER, + ALX_IDLE_DECISN_TIMER_DEF); + } + + /* stats refresh timeout */ + ALX_MEM_W32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL); + + /* intr moduration */ + ALX_MEM_R32(hw, ALX_MASTER, &val); + val = val | ALX_MASTER_IRQMOD2_EN | + ALX_MASTER_IRQMOD1_EN | + ALX_MASTER_SYSALVTIMER_EN; + ALX_MEM_W32(hw, ALX_MASTER, val); + ALX_MEM_W32(hw, ALX_IRQ_MODU_TIMER, + FIELDX(ALX_IRQ_MODU_TIMER1, hw->imt >> 1)); + /* intr re-trig timeout */ + ALX_MEM_W32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO); + /* tpd threshold to trig int */ + ALX_MEM_W32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd); + ALX_MEM_W32(hw, ALX_TINT_TIMER, hw->imt); + + /* mtu, 8:fcs+vlan */ + raw_mtu = hw->mtu + ETH_HLEN; + ALX_MEM_W32(hw, ALX_MTU, raw_mtu + 8); + if (raw_mtu > ALX_MTU_JUMBO_TH) + hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE; + + /* txq */ + if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH) + val = (raw_mtu + 8 + 7) >> 3; + else + val = ALX_TXQ1_JUMBO_TSO_TH >> 3; + ALX_MEM_W32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN); + max_payload = alx_get_readrq(hw) >> 8; + /* + * if BIOS had changed the default dma read max length, + * restore it to default value + */ + if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN) + alx_set_readrq(hw, 128 << ALX_DEV_CTRL_MAXRRS_MIN); + + val = FIELDX(ALX_TXQ0_TPD_BURSTPREF, ALX_TXQ_TPD_BURSTPREF_DEF) | + ALX_TXQ0_MODE_ENHANCE | + ALX_TXQ0_LSO_8023_EN | + ALX_TXQ0_SUPT_IPOPT | + FIELDX(ALX_TXQ0_TXF_BURST_PREF, ALX_TXQ_TXF_BURST_PREF_DEF); + ALX_MEM_W32(hw, ALX_TXQ0, val); + val = FIELDX(ALX_HQTPD_Q1_NUMPREF, ALX_TXQ_TPD_BURSTPREF_DEF) | + FIELDX(ALX_HQTPD_Q2_NUMPREF, ALX_TXQ_TPD_BURSTPREF_DEF) | + FIELDX(ALX_HQTPD_Q3_NUMPREF, ALX_TXQ_TPD_BURSTPREF_DEF) | + ALX_HQTPD_BURST_EN; + ALX_MEM_W32(hw, ALX_HQTPD, val); + + /* rxq, flow control */ + ALX_MEM_R32(hw, ALX_SRAM5, &val); + val = FIELD_GETX(val, ALX_SRAM_RXF_LEN) << 3; + if (val > ALX_SRAM_RXF_LEN_8K) { + val16 = ALX_MTU_STD_ALGN >> 3; + val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3; + } else { + val16 = ALX_MTU_STD_ALGN >> 3; + val = (val - ALX_MTU_STD_ALGN) >> 3; + } + ALX_MEM_W32(hw, ALX_RXQ2, + FIELDX(ALX_RXQ2_RXF_XOFF_THRESH, val16) | + FIELDX(ALX_RXQ2_RXF_XON_THRESH, val)); + val = FIELDX(ALX_RXQ0_NUM_RFD_PREF, ALX_RXQ0_NUM_RFD_PREF_DEF) | + FIELDX(ALX_RXQ0_RSS_MODE, ALX_RXQ0_RSS_MODE_DIS) | + FIELDX(ALX_RXQ0_IDT_TBL_SIZE, ALX_RXQ0_IDT_TBL_SIZE_DEF) | + ALX_RXQ0_RSS_HSTYP_ALL | + ALX_RXQ0_RSS_HASH_EN | + ALX_RXQ0_IPV6_PARSE_EN; + if (ALX_CAP(hw, GIGA)) { + FIELD_SET32(val, ALX_RXQ0_ASPM_THRESH, + ALX_RXQ0_ASPM_THRESH_100M); + } + ALX_MEM_W32(hw, ALX_RXQ0, val); + + /* DMA */ + ALX_MEM_R32(hw, ALX_DMA, &val); + val = FIELDX(ALX_DMA_RORDER_MODE, ALX_DMA_RORDER_MODE_OUT) | + ALX_DMA_RREQ_PRI_DATA | + FIELDX(ALX_DMA_RREQ_BLEN, max_payload) | + FIELDX(ALX_DMA_WDLY_CNT, ALX_DMA_WDLY_CNT_DEF) | + FIELDX(ALX_DMA_RDLY_CNT, ALX_DMA_RDLY_CNT_DEF) | + FIELDX(ALX_DMA_RCHNL_SEL, hw->dma_chnl - 1); + ALX_MEM_W32(hw, ALX_DMA, val); + + /* multi-tx-q weight */ + if (ALX_CAP(hw, MTQ)) { + val = FIELDX(ALX_WRR_PRI, hw->wrr_ctrl) | + FIELDX(ALX_WRR_PRI0, hw->wrr[0]) | + FIELDX(ALX_WRR_PRI1, hw->wrr[1]) | + FIELDX(ALX_WRR_PRI2, hw->wrr[2]) | + FIELDX(ALX_WRR_PRI3, hw->wrr[3]); + ALX_MEM_W32(hw, ALX_WRR, val); + } +} + +void alx_mask_msix(struct alx_hw *hw, int index, bool mask) +{ + u32 reg, val; + + reg = ALX_MSIX_ENTRY_BASE + index * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL; + + val = mask ? PCI_MSIX_ENTRY_CTRL_MASKBIT : 0; + + ALX_MEM_W32(hw, reg, val); + ALX_MEM_FLUSH(hw); +} + +int alx_select_powersaving_speed(struct alx_hw *hw, u16 *speed) +{ + int i, err; + u16 spd, lpa; + bool linkup; + + err = alx_get_phy_link(hw, &linkup, &spd); + if (err) + goto out; + + if (!linkup) { + *speed = SPEED_0; + goto out; + } + + err = alx_read_phy_reg(hw, MII_LPA, &lpa); + if (err) + goto out; + + if (!(lpa & LPA_LPACK)) { + *speed = spd; + goto out; + } + if (lpa & LPA_10FULL) + *speed = SPEED_10 + FULL_DUPLEX; + else if (lpa & LPA_10HALF) + *speed = SPEED_10 + HALF_DUPLEX; + else if (lpa & LPA_100FULL) + *speed = SPEED_100 + FULL_DUPLEX; + else + *speed = SPEED_100 + HALF_DUPLEX; + + if (*speed != spd) { + err = alx_write_phy_reg(hw, ALX_MII_IER, 0); + if (err) + goto out; + err = alx_setup_speed_duplex(hw, + ALX_SPEED_TO_ETHADV(*speed) | ADVERTISED_Autoneg, + ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX); + if (err) + goto out; + + /* wait for linkup */ + for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) { + u16 speed2; + bool link_on; + + msleep(100); + err = alx_get_phy_link(hw, &link_on, &speed2); + if (err) + goto out; + if (link_on) + break; + } + if (i == ALX_MAX_SETUP_LNK_CYCLE) { + err = ALX_LINK_TIMEOUT; + goto out; + } + } + +out: + return err; +} + +void __alx_update_hw_stats(struct alx_hw *hw) +{ + u16 reg; + u32 data; + unsigned long *p; + + /* RX stats */ + reg = ALX_RX_STATS_BIN; + p = &hw->stats.rx_ok; + while (reg <= ALX_RX_STATS_END) { + ALX_MEM_R32(hw, reg, &data); + *p++ += data; + reg += 4; + } + + /* TX stats */ + reg = ALX_TX_STATS_BIN; + p = &hw->stats.tx_ok; + while (reg <= ALX_TX_STATS_END) { + ALX_MEM_R32(hw, reg, &data); + *p++ += data; + reg += 4; + } +} + +static const struct alx_platform_patch plats[] = { +{0x1091, 0x00, 0x1969, 0x0091, 0x1001}, +{0}, +}; + +void alx_patch_assign(struct alx_hw *hw) +{ + int i = 0; + + while (plats[i].pci_did != 0) { + if (plats[i].pci_did == ALX_DID(hw) && + plats[i].subsystem_vid == ALX_SUB_VID(hw) && + plats[i].subsystem_did == ALX_SUB_DID(hw) && + (plats[i].pflag & ALX_PF_ANY_REV || + plats[i].pci_rev == hw->revision)) { + if (plats[i].pflag & ALX_PF_LINK) + hw->lnk_patch = true; + if (plats[i].pflag & ALX_PF_HIB) + hw->hib_patch = true; + } + i++; + } +} + +bool alx_get_phy_info(struct alx_hw *hw) +{ + u16 devs1, devs2; + + if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) || + alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1])) + return false; + + /* since we haven't PMA/PMD status2 register, we can't + * use mdio45_probe function for prtad and mmds. + * use fixed MMD3 to get mmds. + */ + if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) || + alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2)) + return false; + hw->mdio.mmds = devs1 | devs2 << 16; + + return true; +} diff -Nurp linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_hw.h linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_hw.h --- linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_hw.h 1970-01-01 02:00:00.000000000 +0200 +++ linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_hw.h 2013-05-05 13:51:31.000000000 +0300 @@ -0,0 +1,661 @@ +/* + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ALX_HW_H_ +#define ALX_HW_H_ + +/* specific error info */ +#define ALX_ERR_SUCCESS 0x0000 +#define ALX_ERR_ALOAD 0x0001 +#define ALX_ERR_RSTMAC 0x0002 +#define ALX_ERR_PARM 0x0003 +#define ALX_ERR_MIIBUSY 0x0004 +#define ALX_LINK_TIMEOUT 0x0008 + +/* Transmit Packet Descriptor, contains 4 32-bit words. + * + * 31 16 0 + * +----------------+----------------+ + * | vlan-tag | buf length | + * +----------------+----------------+ + * | Word 1 | + * +----------------+----------------+ + * | Word 2: buf addr lo | + * +----------------+----------------+ + * | Word 3: buf addr hi | + * +----------------+----------------+ + * + * Word 2 and 3 combine to form a 64-bit buffer address + * + * Word 1 has three forms, depending on the state of bit 8/12/13: + * if bit8 =='1', the definition is just for custom checksum offload. + * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor + * for the skb is special for LSO V2, Word 2 become total skb length , + * Word 3 is meaningless. + * other condition, the definition is for general skb or ip/tcp/udp + * checksum or LSO(TSO) offload. + * + * Here is the depiction: + * + * 0-+ 0-+ + * 1 | 1 | + * 2 | 2 | + * 3 | Payload offset 3 | L4 header offset + * 4 | (7:0) 4 | (7:0) + * 5 | 5 | + * 6 | 6 | + * 7-+ 7-+ + * 8 Custom csum enable = 1 8 Custom csum enable = 0 + * 9 General IPv4 checksum 9 General IPv4 checksum + * 10 General TCP checksum 10 General TCP checksum + * 11 General UDP checksum 11 General UDP checksum + * 12 Large Send Segment enable 12 Large Send Segment enable + * 13 Large Send Segment type 13 Large Send Segment type + * 14 VLAN tagged 14 VLAN tagged + * 15 Insert VLAN tag 15 Insert VLAN tag + * 16 IPv4 packet 16 IPv4 packet + * 17 Ethernet frame type 17 Ethernet frame type + * 18-+ 18-+ + * 19 | 19 | + * 20 | 20 | + * 21 | Custom csum offset 21 | + * 22 | (25:18) 22 | + * 23 | 23 | MSS (30:18) + * 24 | 24 | + * 25-+ 25 | + * 26-+ 26 | + * 27 | 27 | + * 28 | Reserved 28 | + * 29 | 29 | + * 30-+ 30-+ + * 31 End of packet 31 End of packet + */ + +struct tpd_desc { + __le32 word0; + __le32 word1; + union { + __le64 addr; + struct { + __le32 pkt_len; + __le32 resvd; + } l; + } adrl; +} __packed; + +/* tpd word 0 */ +#define TPD_BUFLEN_MASK 0xFFFF +#define TPD_BUFLEN_SHIFT 0 +#define TPD_VLTAG_MASK 0xFFFF +#define TPD_VLTAG_SHIFT 16 + +/* tpd word 1 */ +#define TPD_CXSUMSTART_MASK 0x00FF +#define TPD_CXSUMSTART_SHIFT 0 +#define TPD_L4HDROFFSET_MASK 0x00FF +#define TPD_L4HDROFFSET_SHIFT 0 +#define TPD_CXSUM_EN_MASK 0x0001 +#define TPD_CXSUM_EN_SHIFT 8 +#define TPD_IP_XSUM_MASK 0x0001 +#define TPD_IP_XSUM_SHIFT 9 +#define TPD_TCP_XSUM_MASK 0x0001 +#define TPD_TCP_XSUM_SHIFT 10 +#define TPD_UDP_XSUM_MASK 0x0001 +#define TPD_UDP_XSUm_SHIFT 11 +#define TPD_LSO_EN_MASK 0x0001 +#define TPD_LSO_EN_SHIFT 12 +#define TPD_LSO_V2_MASK 0x0001 +#define TPD_LSO_V2_SHIFT 13 +#define TPD_VLTAGGED_MASK 0x0001 +#define TPD_VLTAGGED_SHIFT 14 +#define TPD_INS_VLTAG_MASK 0x0001 +#define TPD_INS_VLTAG_SHIFT 15 +#define TPD_IPV4_MASK 0x0001 +#define TPD_IPV4_SHIFT 16 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 17 +#define TPD_CXSUMOFFSET_MASK 0x00FF +#define TPD_CXSUMOFFSET_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 18 +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 31 + +#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK) + +/* Receive Free Descriptor */ +struct rfd_desc { + __le64 addr; /* data buffer address, length is + * declared in register --- every + * buffer has the same size + */ +} __packed; + +/* Receive Return Descriptor, contains 4 32-bit words. + * + * 31 16 0 + * +----------------+----------------+ + * | Word 0 | + * +----------------+----------------+ + * | Word 1: RSS Hash value | + * +----------------+----------------+ + * | Word 2 | + * +----------------+----------------+ + * | Word 3 | + * +----------------+----------------+ + * + * Word 0 depiction & Word 2 depiction: + * + * 0--+ 0--+ + * 1 | 1 | + * 2 | 2 | + * 3 | 3 | + * 4 | 4 | + * 5 | 5 | + * 6 | 6 | + * 7 | IP payload checksum 7 | VLAN tag + * 8 | (15:0) 8 | (15:0) + * 9 | 9 | + * 10 | 10 | + * 11 | 11 | + * 12 | 12 | + * 13 | 13 | + * 14 | 14 | + * 15-+ 15-+ + * 16-+ 16-+ + * 17 | Number of RFDs 17 | + * 18 | (19:16) 18 | + * 19-+ 19 | Protocol ID + * 20-+ 20 | (23:16) + * 21 | 21 | + * 22 | 22 | + * 23 | 23-+ + * 24 | 24 | Reserved + * 25 | Start index of RFD-ring 25-+ + * 26 | (31:20) 26 | RSS Q-num (27:25) + * 27 | 27-+ + * 28 | 28-+ + * 29 | 29 | RSS Hash algorithm + * 30 | 30 | (31:28) + * 31-+ 31-+ + * + * Word 3 depiction: + * + * 0--+ + * 1 | + * 2 | + * 3 | + * 4 | + * 5 | + * 6 | + * 7 | Packet length (include FCS) + * 8 | (13:0) + * 9 | + * 10 | + * 11 | + * 12 | + * 13-+ + * 14 L4 Header checksum error + * 15 IPv4 checksum error + * 16 VLAN tagged + * 17-+ + * 18 | Protocol ID (19:17) + * 19-+ + * 20 Receive error summary + * 21 FCS(CRC) error + * 22 Frame alignment error + * 23 Truncated packet + * 24 Runt packet + * 25 Incomplete packet due to insufficient rx-desc + * 26 Broadcast packet + * 27 Multicast packet + * 28 Ethernet type (EII or 802.3) + * 29 FIFO overflow + * 30 Length error (for 802.3, length field mismatch with actual len) + * 31 Updated, indicate to driver that this RRD is refreshed. + */ + +struct rrd_desc { + __le32 word0; + __le32 rss_hash; + __le32 word2; + __le32 word3; +} __packed; + +/* rrd word 0 */ +#define RRD_XSUM_MASK 0xFFFF +#define RRD_XSUM_SHIFT 0 +#define RRD_NOR_MASK 0x000F +#define RRD_NOR_SHIFT 16 +#define RRD_SI_MASK 0x0FFF +#define RRD_SI_SHIFT 20 + +/* rrd word 2 */ +#define RRD_VLTAG_MASK 0xFFFF +#define RRD_VLTAG_SHIFT 0 +#define RRD_PID_MASK 0x00FF +#define RRD_PID_SHIFT 16 +/* non-ip packet */ +#define RRD_PID_NONIP 0 +/* ipv4(only) */ +#define RRD_PID_IPV4 1 +/* tcp/ipv6 */ +#define RRD_PID_IPV6TCP 2 +/* tcp/ipv4 */ +#define RRD_PID_IPV4TCP 3 +/* udp/ipv6 */ +#define RRD_PID_IPV6UDP 4 +/* udp/ipv4 */ +#define RRD_PID_IPV4UDP 5 +/* ipv6(only) */ +#define RRD_PID_IPV6 6 +/* LLDP packet */ +#define RRD_PID_LLDP 7 +/* 1588 packet */ +#define RRD_PID_1588 8 +#define RRD_RSSQ_MASK 0x0007 +#define RRD_RSSQ_SHIFT 25 +#define RRD_RSSALG_MASK 0x000F +#define RRD_RSSALG_SHIFT 28 +#define RRD_RSSALG_TCPV6 0x1 +#define RRD_RSSALG_IPV6 0x2 +#define RRD_RSSALG_TCPV4 0x4 +#define RRD_RSSALG_IPV4 0x8 + +/* rrd word 3 */ +#define RRD_PKTLEN_MASK 0x3FFF +#define RRD_PKTLEN_SHIFT 0 +#define RRD_ERR_L4_MASK 0x0001 +#define RRD_ERR_L4_SHIFT 14 +#define RRD_ERR_IPV4_MASK 0x0001 +#define RRD_ERR_IPV4_SHIFT 15 +#define RRD_VLTAGGED_MASK 0x0001 +#define RRD_VLTAGGED_SHIFT 16 +#define RRD_OLD_PID_MASK 0x0007 +#define RRD_OLD_PID_SHIFT 17 +#define RRD_ERR_RES_MASK 0x0001 +#define RRD_ERR_RES_SHIFT 20 +#define RRD_ERR_FCS_MASK 0x0001 +#define RRD_ERR_FCS_SHIFT 21 +#define RRD_ERR_FAE_MASK 0x0001 +#define RRD_ERR_FAE_SHIFT 22 +#define RRD_ERR_TRUNC_MASK 0x0001 +#define RRD_ERR_TRUNC_SHIFT 23 +#define RRD_ERR_RUNT_MASK 0x0001 +#define RRD_ERR_RUNT_SHIFT 24 +#define RRD_ERR_ICMP_MASK 0x0001 +#define RRD_ERR_ICMP_SHIFT 25 +#define RRD_BCAST_MASK 0x0001 +#define RRD_BCAST_SHIFT 26 +#define RRD_MCAST_MASK 0x0001 +#define RRD_MCAST_SHIFT 27 +#define RRD_ETHTYPE_MASK 0x0001 +#define RRD_ETHTYPE_SHIFT 28 +#define RRD_ERR_FIFOV_MASK 0x0001 +#define RRD_ERR_FIFOV_SHIFT 29 +#define RRD_ERR_LEN_MASK 0x0001 +#define RRD_ERR_LEN_SHIFT 30 +#define RRD_UPDATED_MASK 0x0001 +#define RRD_UPDATED_SHIFT 31 + + +/* Statistics counters collected by the MAC */ +struct alx_hw_stats { + /* rx */ + unsigned long rx_ok; + unsigned long rx_bcast; + unsigned long rx_mcast; + unsigned long rx_pause; + unsigned long rx_ctrl; + unsigned long rx_fcs_err; + unsigned long rx_len_err; + unsigned long rx_byte_cnt; + unsigned long rx_runt; + unsigned long rx_frag; + unsigned long rx_sz_64B; + unsigned long rx_sz_127B; + unsigned long rx_sz_255B; + unsigned long rx_sz_511B; + unsigned long rx_sz_1023B; + unsigned long rx_sz_1518B; + unsigned long rx_sz_max; + unsigned long rx_ov_sz; + unsigned long rx_ov_rxf; + unsigned long rx_ov_rrd; + unsigned long rx_align_err; + unsigned long rx_bc_byte_cnt; + unsigned long rx_mc_byte_cnt; + unsigned long rx_err_addr; + + /* tx */ + unsigned long tx_ok; + unsigned long tx_bcast; + unsigned long tx_mcast; + unsigned long tx_pause; + unsigned long tx_exc_defer; + unsigned long tx_ctrl; + unsigned long tx_defer; + unsigned long tx_byte_cnt; + unsigned long tx_sz_64B; + unsigned long tx_sz_127B; + unsigned long tx_sz_255B; + unsigned long tx_sz_511B; + unsigned long tx_sz_1023B; + unsigned long tx_sz_1518B; + unsigned long tx_sz_max; + unsigned long tx_single_col; + unsigned long tx_multi_col; + unsigned long tx_late_col; + unsigned long tx_abort_col; + unsigned long tx_underrun; + unsigned long tx_trd_eop; + unsigned long tx_len_err; + unsigned long tx_trunc; + unsigned long tx_bc_byte_cnt; + unsigned long tx_mc_byte_cnt; + unsigned long update; +}; + +#define SPEED_0 0 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 +#define ALX_MAX_SETUP_LNK_CYCLE 50 + +#define ALX_SPEED_TO_ETHADV(_speed) (\ +(_speed) == SPEED_1000 + FULL_DUPLEX ? ADVERTISED_1000baseT_Full : \ +(_speed) == SPEED_100 + FULL_DUPLEX ? ADVERTISED_100baseT_Full : \ +(_speed) == SPEED_100 + HALF_DUPLEX ? ADVERTISED_10baseT_Half : \ +(_speed) == SPEED_10 + FULL_DUPLEX ? ADVERTISED_10baseT_Full : \ +(_speed) == SPEED_10 + HALF_DUPLEX ? ADVERTISED_10baseT_Half : \ +0) + +#define speed_desc(_s) (\ + (_s) == SPEED_1000 + FULL_DUPLEX ? \ + "1 Gbps Full" : \ + (_s) == SPEED_100 + FULL_DUPLEX ? \ + "100 Mbps Full" : \ + (_s) == SPEED_100 + HALF_DUPLEX ? \ + "100 Mbps Half" : \ + (_s) == SPEED_10 + FULL_DUPLEX ? \ + "10 Mbps Full" : \ + (_s) == SPEED_10 + HALF_DUPLEX ? \ + "10 Mbps Half" : \ + "Unknown speed") + +/* for FlowControl */ +#define ALX_FC_RX 0x01 +#define ALX_FC_TX 0x02 +#define ALX_FC_ANEG 0x04 + +/* for sleep control */ +#define ALX_SLEEP_WOL_PHY 0x00000001 +#define ALX_SLEEP_WOL_MAGIC 0x00000002 +#define ALX_SLEEP_CIFS 0x00000004 +#define ALX_SLEEP_ACTIVE (\ + ALX_SLEEP_WOL_PHY | \ + ALX_SLEEP_WOL_MAGIC | \ + ALX_SLEEP_CIFS) + +/* for RSS hash type */ +#define ALX_RSS_HASH_TYPE_IPV4 0x1 +#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2 +#define ALX_RSS_HASH_TYPE_IPV6 0x4 +#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8 +#define ALX_RSS_HASH_TYPE_ALL (\ + ALX_RSS_HASH_TYPE_IPV4 |\ + ALX_RSS_HASH_TYPE_IPV4_TCP |\ + ALX_RSS_HASH_TYPE_IPV6 |\ + ALX_RSS_HASH_TYPE_IPV6_TCP) +#define ALX_DEF_RXBUF_SIZE 1536 +#define ALX_MAX_JUMBO_PKT_SIZE (9*1024) +#define ALX_MAX_TSO_PKT_SIZE (7*1024) +#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE +#define ALX_MIN_FRAME_SIZE 68 +#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + +#define ALX_MAX_RX_QUEUES 8 +#define ALX_MAX_TX_QUEUES 4 +#define ALX_MAX_HANDLED_INTRS 5 + +#define ALX_ISR_MISC (\ + ALX_ISR_PCIE_LNKDOWN | \ + ALX_ISR_DMAW | \ + ALX_ISR_DMAR | \ + ALX_ISR_SMB | \ + ALX_ISR_MANU | \ + ALX_ISR_TIMER) + +#define ALX_ISR_FATAL (\ + ALX_ISR_PCIE_LNKDOWN | \ + ALX_ISR_DMAW | \ + ALX_ISR_DMAR) + +#define ALX_ISR_ALERT (\ + ALX_ISR_RXF_OV | \ + ALX_ISR_TXF_UR | \ + ALX_ISR_RFD_UR) + +#define ALX_ISR_ALL_QUEUES (\ + ALX_ISR_TX_Q0 | \ + ALX_ISR_TX_Q1 | \ + ALX_ISR_TX_Q2 | \ + ALX_ISR_TX_Q3 | \ + ALX_ISR_RX_Q0 | \ + ALX_ISR_RX_Q1 | \ + ALX_ISR_RX_Q2 | \ + ALX_ISR_RX_Q3 | \ + ALX_ISR_RX_Q4 | \ + ALX_ISR_RX_Q5 | \ + ALX_ISR_RX_Q6 | \ + ALX_ISR_RX_Q7) + +/* maximum interrupt vectors for msix */ +#define ALX_MAX_MSIX_INTRS 16 + +#define FIELD_GETX(_x, _name) (((_x) >> (_name##_SHIFT)) & (_name##_MASK)) +#define FIELD_SETS(_x, _name, _v) (\ +(_x) = \ +((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\ +(((u16)(_v) & (_name##_MASK)) << (_name##_SHIFT))) +#define FIELD_SET32(_x, _name, _v) (\ +(_x) = \ +((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\ +(((_v) & (_name##_MASK)) << (_name##_SHIFT))) +#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT)) + +struct alx_hw { + void *pdev; + u8 __iomem *hw_addr; + + /* pci regs */ + u16 device_id; + u16 subdev_id; + u16 subven_id; + u8 revision; + + unsigned long capability; + + /* current & permanent mac addr */ + u8 mac_addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + u16 mtu; + u16 imt; + u8 dma_chnl; + u8 max_dma_chnl; + /* tpd threshold to trig INT */ + u32 ith_tpd; + u32 rx_ctrl; + u32 mc_hash[2]; + + u8 rss_key[40]; + u32 rss_idt[32]; + u16 rss_idt_size; + u8 rss_hash_type; + + /* weight round robin for multiple-tx-Q */ + u32 wrr[ALX_MAX_TX_QUEUES]; + /* prioirty control */ + u32 wrr_ctrl; + + /* interrupt mask for ALX_IMR */ + u32 imask; + u32 smb_timer; + bool link_up; + u16 link_speed; + u8 link_duplex; + + /* auto-neg advertisement or force mode config */ + u32 adv_cfg; + u8 flowctrl; + + struct alx_hw_stats hw_stats; + u32 sleep_ctrl; + /* sram address for pattern wol */ + u32 ptrn_ofs; + /* max patterns number */ + u16 max_ptrns; + + spinlock_t mdio_lock; + struct mdio_if_info mdio; + u16 phy_id[2]; + + struct alx_hw_stats stats; + /* PHY link patch flag */ + bool lnk_patch; + /* PHY hibernation patch flag */ + bool hib_patch; + /* FPGA or ASIC */ + bool is_fpga; +}; + +#define ALX_DID(_hw) ((_hw)->device_id) +#define ALX_SUB_VID(_hw) ((_hw)->subven_id) +#define ALX_SUB_DID(_hw) ((_hw)->subdev_id) +#define ALX_REVID(_hw) ((_hw)->revision >> ALX_PCI_REVID_SHIFT) +#define ALX_WITH_CR(_hw) ((_hw)->revision & 1) + +enum ALX_CAPS { + ALX_CAP_GIGA = 0, + ALX_CAP_PTP, + ALX_CAP_AZ, + ALX_CAP_L0S, + ALX_CAP_L1, + ALX_CAP_SWOI, + ALX_CAP_RSS, + ALX_CAP_MSIX, + /* support Multi-TX-Q */ + ALX_CAP_MTQ, + /* support Multi-RX-Q */ + ALX_CAP_MRQ, +}; +#define ALX_CAP(_hw, _cap) (\ + test_bit(ALX_CAP_##_cap, &(_hw)->capability)) +#define ALX_CAP_SET(_hw, _cap) (\ + set_bit(ALX_CAP_##_cap, &(_hw)->capability)) +#define ALX_CAP_CLEAR(_hw, _cap) (\ + clear_bit(ALX_CAP_##_cap, &(_hw)->capability)) + +/* write to 8bit register via pci memory space */ +#define ALX_MEM_W8(s, reg, val) (writeb((val), ((s)->hw_addr + reg))) + +/* read from 8bit register via pci memory space */ +#define ALX_MEM_R8(s, reg, pdat) (\ + *(u8 *)(pdat) = readb((s)->hw_addr + reg)) + +/* write to 16bit register via pci memory space */ +#define ALX_MEM_W16(s, reg, val) (writew((val), ((s)->hw_addr + reg))) + +/* read from 16bit register via pci memory space */ +#define ALX_MEM_R16(s, reg, pdat) (\ + *(u16 *)(pdat) = readw((s)->hw_addr + reg)) + +/* write to 32bit register via pci memory space */ +#define ALX_MEM_W32(s, reg, val) (writel((val), ((s)->hw_addr + reg))) + +/* read from 32bit register via pci memory space */ +#define ALX_MEM_R32(s, reg, pdat) (\ + *(u32 *)(pdat) = readl((s)->hw_addr + reg)) + +/* read from 16bit register via pci config space */ +#define ALX_CFG_R16(s, reg, pdat) (\ + pci_read_config_word((s)->pdev, (reg), (pdat))) + +/* write to 16bit register via pci config space */ +#define ALX_CFG_W16(s, reg, val) (\ + pci_write_config_word((s)->pdev, (reg), (val))) + +/* flush regs */ +#define ALX_MEM_FLUSH(s) (readl((s)->hw_addr)) + + +int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr); +void alx_add_mc_addr(struct alx_hw *hw, u8 *addr); +void alx_reset_phy(struct alx_hw *hw, bool hib_en); +void alx_reset_pcie(struct alx_hw *hw); +void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en); +int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl); +void alx_post_phy_link(struct alx_hw *hw, u16 speed, bool az_en); +int alx_pre_suspend(struct alx_hw *hw, u16 speed); +int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data); +int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data); +int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata); +int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data); +int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata); +int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data); +int alx_get_phy_link(struct alx_hw *hw, bool *link_up, u16 *speed); +int alx_clear_phy_intr(struct alx_hw *hw); +int alx_config_wol(struct alx_hw *hw); +void alx_cfg_mac_fc(struct alx_hw *hw, u8 fc); +void alx_start_mac(struct alx_hw *hw); +int alx_stop_mac(struct alx_hw *hw); +int alx_reset_mac(struct alx_hw *hw); +void alx_set_macaddr(struct alx_hw *hw, u8 *addr); +bool alx_phy_configed(struct alx_hw *hw); +void alx_configure_basic(struct alx_hw *hw); +void alx_configure_rss(struct alx_hw *hw, bool en); +void alx_mask_msix(struct alx_hw *hw, int index, bool mask); +int alx_select_powersaving_speed(struct alx_hw *hw, u16 *speed); +void __alx_update_hw_stats(struct alx_hw *hw); +void __alx_start_phy_polling(struct alx_hw *hw, u16 clk_sel); + +#define alx_get_readrq(_hw) pcie_get_readrq((_hw)->pdev) +#define alx_set_readrq(_hw, _v) pcie_set_readrq((_hw)->pdev, _v) + + +/* some issues are relavant to specific platforms + * we assign those patches for the chip by pci device id + * vendor id, subsystem id and revision number + */ +struct alx_platform_patch { + u16 pci_did; + u8 pci_rev; + u16 subsystem_vid; + u16 subsystem_did; + u32 pflag; +}; +/* PHY link issue */ +#define ALX_PF_LINK 0x00001 +/* Hibernatation issue */ +#define ALX_PF_HIB 0x00002 +/* not care revision number */ +#define ALX_PF_ANY_REV 0x10000 + + +void alx_patch_assign(struct alx_hw *hw); +bool alx_get_phy_info(struct alx_hw *hw); + +#endif diff -Nurp linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_main.c linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_main.c --- linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_main.c 1970-01-01 02:00:00.000000000 +0200 +++ linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_main.c 2013-05-05 13:51:31.000000000 +0300 @@ -0,0 +1,2820 @@ +/* + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/if_vlan.h> +#include <linux/mii.h> +#include <linux/mdio.h> +#include <linux/aer.h> +#include <linux/bitops.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> +#include <net/ip6_checksum.h> + +#include "alx_reg.h" +#include "alx_hw.h" +#include "alx.h" + +#define DRV_MAJ 1 +#define DRV_MIN 2 +#define DRV_PATCH 3 +#define DRV_MODULE_VER \ + __stringify(DRV_MAJ) "." __stringify(DRV_MIN) "." \ + __stringify(DRV_PATCH) + +char alx_drv_name[] = "alx"; +char alx_drv_version[] = DRV_MODULE_VER; +static const char alx_drv_desc[] = +"Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"; + +/* alx_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +#define ALX_ETHER_DEVICE(device_id) {\ + PCI_DEVICE(ALX_VENDOR_ID, device_id)} +static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { + ALX_ETHER_DEVICE(ALX_DEV_ID_AR8161), + ALX_ETHER_DEVICE(ALX_DEV_ID_AR8162), + ALX_ETHER_DEVICE(ALX_DEV_ID_AR8171), + ALX_ETHER_DEVICE(ALX_DEV_ID_AR8172), + {0,} +}; + +MODULE_DEVICE_TABLE(pci, alx_pci_tbl); +MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>"); +MODULE_DESCRIPTION("Qualcomm Atheros Gigabit Ethernet Driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_MODULE_VER); + +static int alx_poll(struct napi_struct *napi, int budget); +static irqreturn_t alx_msix_ring(int irq, void *data); +static irqreturn_t alx_intr_msix_misc(int irq, void *data); +static irqreturn_t alx_intr_msi(int irq, void *data); +static irqreturn_t alx_intr_legacy(int irq, void *data); +static void alx_init_ring_ptrs(struct alx_adapter *adpt); +static int alx_reinit_rings(struct alx_adapter *adpt); + +static inline void alx_schedule_work(struct alx_adapter *adpt) +{ + if (!ALX_FLAG(adpt, HALT)) + schedule_work(&adpt->task); +} + +static inline void alx_cancel_work(struct alx_adapter *adpt) +{ + cancel_work_sync(&adpt->task); +} + + +static void __alx_set_rx_mode(struct net_device *netdev) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + struct netdev_hw_addr *ha; + + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(ha, netdev) + alx_add_mc_addr(hw, ha->addr); + + ALX_MEM_W32(hw, ALX_HASH_TBL0, hw->mc_hash[0]); + ALX_MEM_W32(hw, ALX_HASH_TBL1, hw->mc_hash[1]); + + /* check for Promiscuous and All Multicast modes */ + hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); + if (netdev->flags & IFF_PROMISC) + hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; + if (netdev->flags & IFF_ALLMULTI) + hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; + + ALX_MEM_W32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +/* alx_set_rx_mode - Multicast and Promiscuous mode set */ +static void alx_set_rx_mode(struct net_device *netdev) +{ + __alx_set_rx_mode(netdev); +} + + +/* alx_set_mac - Change the Ethernet Address of the NIC */ +static int alx_set_mac_address(struct net_device *netdev, void *data) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + struct sockaddr *addr = data; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netdev->addr_assign_type & NET_ADDR_RANDOM) + netdev->addr_assign_type ^= NET_ADDR_RANDOM; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + alx_set_macaddr(hw, hw->mac_addr); + + return 0; +} + +static void alx_free_napis(struct alx_adapter *adpt) +{ + struct alx_napi *np; + int i; + + for (i = 0; i < adpt->nr_napi; i++) { + np = adpt->qnapi[i]; + if (!np) + continue; + + netif_napi_del(&np->napi); + kfree(np->txq); + np->txq = NULL; + kfree(np->rxq); + np->rxq = NULL; + adpt->qnapi[i] = NULL; + } +} +static u16 tx_pidx_reg[] = {ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX, + ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX}; +static u16 tx_cidx_reg[] = {ALX_TPD_PRI0_CIDX, ALX_TPD_PRI1_CIDX, + ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX}; +static u32 tx_vect_mask[] = {ALX_ISR_TX_Q0, ALX_ISR_TX_Q1, + ALX_ISR_TX_Q2, ALX_ISR_TX_Q3}; +static u32 rx_vect_mask[] = {ALX_ISR_RX_Q0, ALX_ISR_RX_Q1, + ALX_ISR_RX_Q2, ALX_ISR_RX_Q3, + ALX_ISR_RX_Q4, ALX_ISR_RX_Q5, + ALX_ISR_RX_Q6, ALX_ISR_RX_Q7}; +static int alx_alloc_napis(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + struct alx_napi *np; + struct alx_rx_queue *rxq; + struct alx_tx_queue *txq; + int i; + + hw->imask &= ~ALX_ISR_ALL_QUEUES; + + /* alloc alx_napi */ + for (i = 0; i < adpt->nr_napi; i++) { + np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL); + if (!np) + goto err_out; + + np->adpt = adpt; + netif_napi_add(adpt->netdev, &np->napi, alx_poll, 64); + adpt->qnapi[i] = np; + } + + /* alloc tx queue */ + for (i = 0; i < adpt->nr_txq; i++) { + np = adpt->qnapi[i]; + txq = kzalloc(sizeof(struct alx_tx_queue), GFP_KERNEL); + if (!txq) + goto err_out; + np->txq = txq; + txq->p_reg = tx_pidx_reg[i]; + txq->c_reg = tx_cidx_reg[i]; + txq->count = adpt->tx_ringsz; + txq->qidx = (u16)i; + np->vec_mask |= tx_vect_mask[i]; + hw->imask |= tx_vect_mask[i]; + } + + /* alloc rx queue */ + for (i = 0; i < adpt->nr_rxq; i++) { + np = adpt->qnapi[i]; + rxq = kzalloc(sizeof(struct alx_rx_queue), GFP_KERNEL); + if (!rxq) + goto err_out; + np->rxq = rxq; + rxq->p_reg = ALX_RFD_PIDX; + rxq->c_reg = ALX_RFD_CIDX; + rxq->count = adpt->rx_ringsz; + rxq->qidx = (u16)i; + __skb_queue_head_init(&rxq->list); + np->vec_mask |= rx_vect_mask[i]; + hw->imask |= rx_vect_mask[i]; + } + + return 0; + +err_out: + alx_free_napis(adpt); + return -ENOMEM; +} + +static int alx_alloc_rings(struct alx_adapter *adpt) +{ + struct alx_buffer *bf; + u8 *desc; + dma_addr_t dma; + int i, size, offset; + + /* alx_buffer */ + size = sizeof(struct alx_buffer) * adpt->nr_txq * adpt->tx_ringsz + + sizeof(struct alx_buffer) * adpt->nr_hwrxq * adpt->rx_ringsz; + + bf = vzalloc(size); + if (!bf) + goto err_out; + + /* physical rx rings */ + size = sizeof(struct tpd_desc) * adpt->tx_ringsz * adpt->nr_txq + + (sizeof(struct rrd_desc) + sizeof(struct rfd_desc)) * + adpt->rx_ringsz * adpt->nr_hwrxq + + adpt->nr_txq * 8 + + adpt->nr_hwrxq * 8; + desc = dma_alloc_coherent(&adpt->pdev->dev, size, &dma, GFP_KERNEL); + if (!desc) + goto err_out; + + memset(desc, 0, size); + adpt->ring_header.desc = desc; + adpt->ring_header.dma = dma; + adpt->ring_header.size = size; + + size = sizeof(struct tpd_desc) * adpt->tx_ringsz; + for (i = 0; i < adpt->nr_txq; i++) { + offset = ALIGN(dma, 8) - dma; + desc += offset; + dma += offset; + adpt->qnapi[i]->txq->netdev = adpt->netdev; + adpt->qnapi[i]->txq->dev = &adpt->pdev->dev; + adpt->qnapi[i]->txq->tpd_hdr = (struct tpd_desc *)desc; + adpt->qnapi[i]->txq->tpd_dma = dma; + adpt->qnapi[i]->txq->count = adpt->tx_ringsz; + adpt->qnapi[i]->txq->bf_info = bf; + desc += size; + dma += size; + bf += adpt->tx_ringsz; + } + size = sizeof(struct rrd_desc) * adpt->rx_ringsz; + for (i = 0; i < adpt->nr_hwrxq; i++) { + offset = ALIGN(dma, 8) - dma; + desc += offset; + dma += offset; + adpt->qnapi[i]->rxq->rrd_hdr = (struct rrd_desc *)desc; + adpt->qnapi[i]->rxq->rrd_dma = dma; + adpt->qnapi[i]->rxq->bf_info = bf; + desc += size; + dma += size; + bf += adpt->rx_ringsz; + } + size = sizeof(struct rfd_desc) * adpt->rx_ringsz; + for (i = 0; i < adpt->nr_hwrxq; i++) { + offset = ALIGN(dma, 8) - dma; + desc += offset; + dma += offset; + adpt->qnapi[i]->rxq->rfd_hdr = (struct rfd_desc *)desc; + adpt->qnapi[i]->rxq->rfd_dma = dma; + desc += size; + dma += size; + } + for (i = 0; i < adpt->nr_rxq; i++) { + adpt->qnapi[i]->rxq->netdev = adpt->netdev; + adpt->qnapi[i]->rxq->dev = &adpt->pdev->dev; + adpt->qnapi[i]->rxq->count = adpt->rx_ringsz; + } + + return 0; + +err_out: + if (bf) + vfree(bf); + + return -ENOMEM; +} + +static void alx_free_rings(struct alx_adapter *adpt) +{ + struct alx_buffer *bf; + struct alx_napi *np; + + /* alx_buffer header is in the 1st tpdq->bf_info */ + np = adpt->qnapi[0]; + if (np) { + bf = np->txq->bf_info; + if (bf) { + vfree(bf); + np->txq->bf_info = NULL; + } + } + if (adpt->ring_header.desc) { + dma_free_coherent(&adpt->pdev->dev, + adpt->ring_header.size, + adpt->ring_header.desc, + adpt->ring_header.dma); + adpt->ring_header.desc = NULL; + } +} + +/* dequeue skb from RXQ, return true if the RXQ is empty */ +static inline bool alx_skb_dequeue_n(struct alx_rx_queue *rxq, int max_pkts, + struct sk_buff_head *list) +{ + struct alx_adapter *adpt = netdev_priv(rxq->netdev); + bool use_lock = !ALX_CAP(&adpt->hw, MRQ); + bool empty; + struct sk_buff *skb; + int count = 0; + + if (use_lock) + spin_lock(&rxq->list.lock); + + while (count < max_pkts || max_pkts == -1) { + skb = __skb_dequeue(&rxq->list); + if (skb) { + __skb_queue_tail(list, skb); + count++; + } else + break; + } + + empty = skb_queue_empty(&rxq->list); + + if (use_lock) + spin_unlock(&rxq->list.lock); + + netif_info(adpt, rx_status, adpt->netdev, + "RX %d packets\n", + count); + + return empty; +} + +static inline void alx_skb_queue_tail(struct alx_rx_queue *rxq, + struct sk_buff *skb) +{ + struct alx_adapter *adpt = netdev_priv(rxq->netdev); + bool use_lock = !ALX_CAP(&adpt->hw, MRQ); + + if (use_lock) + spin_lock(&rxq->list.lock); + + __skb_queue_tail(&rxq->list, skb); + + if (use_lock) + spin_unlock(&rxq->list.lock); +} + +int alx_alloc_rxring_buf(struct alx_adapter *adpt, + struct alx_rx_queue *rxq) +{ + struct sk_buff *skb; + struct alx_buffer *cur_buf; + struct rfd_desc *rfd; + dma_addr_t dma; + u16 cur, next, count = 0; + + next = cur = rxq->pidx; + if (++next == rxq->count) + next = 0; + cur_buf = rxq->bf_info + cur; + rfd = rxq->rfd_hdr + cur; + + while (!cur_buf->skb && next != rxq->cidx) { + skb = dev_alloc_skb(adpt->rxbuf_size); + if (unlikely(!skb)) { + netdev_warn(adpt->netdev, "alloc skb failed\n"); + break; + } + dma = dma_map_single(rxq->dev, + skb->data, + adpt->rxbuf_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(rxq->dev, dma)) { + netdev_warn(adpt->netdev, "mapping rx-buffer failed\n"); + dev_kfree_skb(skb); + break; + } + cur_buf->skb = skb; + dma_unmap_len_set(cur_buf, size, adpt->rxbuf_size); + dma_unmap_addr_set(cur_buf, dma, dma); + rfd->addr = cpu_to_le64(dma); + + cur = next; + if (++next == rxq->count) + next = 0; + cur_buf = rxq->bf_info + cur; + rfd = rxq->rfd_hdr + cur; + count++; + } + + if (count) { + wmb(); + rxq->pidx = cur; + ALX_MEM_W16(&adpt->hw, rxq->p_reg, (u16)cur); + } + + return count; +} + +static void alx_free_rxring_buf(struct alx_rx_queue *rxq) +{ + struct alx_buffer *cur_buf; + struct sk_buff_head list; + u16 i; + + if (rxq == NULL) + return; + + for (i = 0; i < rxq->count; i++) { + cur_buf = rxq->bf_info + i; + if (cur_buf->skb) { + dma_unmap_single(rxq->dev, + dma_unmap_addr(cur_buf, dma), + dma_unmap_len(cur_buf, size), + DMA_FROM_DEVICE); + dev_kfree_skb(cur_buf->skb); + cur_buf->skb = NULL; + dma_unmap_len_set(cur_buf, size, 0); + dma_unmap_addr_set(cur_buf, dma, 0); + } + } + + /* some skbs might be pending in the list */ + __skb_queue_head_init(&list); + alx_skb_dequeue_n(rxq, -1, &list); + while (!skb_queue_empty(&list)) { + struct sk_buff *skb; + + skb = __skb_dequeue(&list); + dev_kfree_skb(skb); + } + + rxq->pidx = 0; + rxq->cidx = 0; + rxq->rrd_cidx = 0; +} + +int alx_setup_all_ring_resources(struct alx_adapter *adpt) +{ + int err; + + err = alx_alloc_napis(adpt); + if (err) + goto out; + + err = alx_alloc_rings(adpt); + if (err) + goto out; + + err = alx_reinit_rings(adpt); + +out: + if (unlikely(err)) { + netif_err(adpt, ifup, adpt->netdev, + "setup_all_ring_resources fail %d\n", + err); + } + return err; +} + +static void alx_txbuf_unmap_and_free(struct alx_tx_queue *txq, int entry) +{ + struct alx_buffer *txb = txq->bf_info + entry; + + if (dma_unmap_len(txb, size) && + txb->flags & ALX_BUF_TX_FIRSTFRAG) { + dma_unmap_single(txq->dev, + dma_unmap_addr(txb, dma), + dma_unmap_len(txb, size), + DMA_TO_DEVICE); + txb->flags &= ~ALX_BUF_TX_FIRSTFRAG; + } else if (dma_unmap_len(txb, size)) { + dma_unmap_page(txq->dev, + dma_unmap_addr(txb, dma), + dma_unmap_len(txb, size), + DMA_TO_DEVICE); + } + if (txb->skb) { + dev_kfree_skb_any(txb->skb); + txb->skb = NULL; + } + dma_unmap_len_set(txb, size, 0); +} + +static void alx_free_txring_buf(struct alx_tx_queue *txq) +{ + int i; + + if (!txq->bf_info) + return; + + for (i = 0; i < txq->count; i++) + alx_txbuf_unmap_and_free(txq, i); + + memset(txq->bf_info, 0, txq->count * sizeof(struct alx_buffer)); + memset(txq->tpd_hdr, 0, txq->count * sizeof(struct tpd_desc)); + txq->pidx = 0; + atomic_set(&txq->cidx, 0); + + netdev_tx_reset_queue(netdev_get_tx_queue(txq->netdev, txq->qidx)); +} + +/* free up pending skb for tx/rx */ +static void alx_free_all_rings_buf(struct alx_adapter *adpt) +{ + int i; + + for (i = 0; i < adpt->nr_txq; i++) + if (adpt->qnapi[i]) + alx_free_txring_buf(adpt->qnapi[i]->txq); + + for (i = 0; i < adpt->nr_hwrxq; i++) + if (adpt->qnapi[i]) + alx_free_rxring_buf(adpt->qnapi[i]->rxq); +} + +void alx_free_all_ring_resources(struct alx_adapter *adpt) +{ + alx_free_all_rings_buf(adpt); + alx_free_rings(adpt); + alx_free_napis(adpt); +} + +static inline int alx_tpd_avail(struct alx_tx_queue *txq) +{ + u16 cidx = atomic_read(&txq->cidx); + + return txq->pidx >= cidx ? + txq->count + cidx - txq->pidx - 1 : + cidx - txq->pidx - 1; +} + + + +static bool alx_clean_tx_irq(struct alx_tx_queue *txq) +{ + struct alx_adapter *adpt = netdev_priv(txq->netdev); + struct netdev_queue *netque; + u16 hw_cidx, sw_cidx; + unsigned int total_bytes = 0, total_packets = 0; + int budget = ALX_DEFAULT_TX_WORK; + + if (ALX_FLAG(adpt, HALT)) + return true; + + netque = netdev_get_tx_queue(adpt->netdev, txq->qidx); + sw_cidx = atomic_read(&txq->cidx); + + ALX_MEM_R16(&adpt->hw, txq->c_reg, &hw_cidx); + + if (sw_cidx != hw_cidx) { + + netif_info(adpt, tx_done, adpt->netdev, + "TX[Q:%d, Preg:%x]: cons = 0x%x, hw-cons = 0x%x\n", + txq->qidx, txq->p_reg, sw_cidx, hw_cidx); + + while (sw_cidx != hw_cidx && budget > 0) { + struct sk_buff *skb; + + skb = txq->bf_info[sw_cidx].skb; + if (skb) { + total_bytes += skb->len; + total_packets++; + budget--; + } + alx_txbuf_unmap_and_free(txq, sw_cidx); + if (++sw_cidx == txq->count) + sw_cidx = 0; + } + atomic_set(&txq->cidx, sw_cidx); + + netdev_tx_completed_queue(netque, total_packets, total_bytes); + } + + if (unlikely(netif_tx_queue_stopped(netque) && + netif_carrier_ok(adpt->netdev) && + alx_tpd_avail(txq) > ALX_TX_WAKEUP_THRESH(txq) && + !ALX_FLAG(adpt, HALT))) { + netif_tx_wake_queue(netque); + } + + return sw_cidx == hw_cidx; +} + +static bool alx_dispatch_skb(struct alx_rx_queue *rxq) +{ + struct alx_adapter *adpt = netdev_priv(rxq->netdev); + struct rrd_desc *rrd; + struct alx_buffer *rxb; + struct sk_buff *skb; + u16 length, rfd_cleaned = 0; + struct alx_rx_queue *tmp_rxq; + int qnum; + + if (test_and_set_bit(ALX_RQ_USING, &rxq->flag)) + return false; + + while (1) { + rrd = rxq->rrd_hdr + rxq->rrd_cidx; + if (!(rrd->word3 & (1 << RRD_UPDATED_SHIFT))) + break; + rrd->word3 &= ~(1 << RRD_UPDATED_SHIFT); + + if (unlikely(FIELD_GETX(rrd->word0, RRD_SI) != rxq->cidx || + FIELD_GETX(rrd->word0, RRD_NOR) != 1)) { + netif_err(adpt, rx_err, adpt->netdev, + "wrong SI/NOR packet! rrd->word0= %08x\n", + rrd->word0); + /* reset chip */ + ALX_FLAG_SET(adpt, TASK_RESET); + alx_schedule_work(adpt); + return true; + } + rxb = rxq->bf_info + rxq->cidx; + dma_unmap_single(rxq->dev, + dma_unmap_addr(rxb, dma), + dma_unmap_len(rxb, size), + DMA_FROM_DEVICE); + dma_unmap_len_set(rxb, size, 0); + skb = rxb->skb; + rxb->skb = NULL; + + if (unlikely(rrd->word3 & (1 << RRD_ERR_RES_SHIFT) || + rrd->word3 & (1 << RRD_ERR_LEN_SHIFT))) { + netdev_warn(adpt->netdev, + "wrong packet! rrd->word3 is %08x\n", + rrd->word3); + rrd->word3 = 0; + dev_kfree_skb_any(skb); + goto next_pkt; + } + length = FIELD_GETX(rrd->word3, RRD_PKTLEN) - ETH_FCS_LEN; + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, adpt->netdev); + /* checksum */ + skb_checksum_none_assert(skb); + if (adpt->netdev->features & NETIF_F_RXCSUM) { + switch (FIELD_GETX(rrd->word2, RRD_PID)) { + case RRD_PID_IPV6UDP: + case RRD_PID_IPV4UDP: + case RRD_PID_IPV4TCP: + case RRD_PID_IPV6TCP: + if (rrd->word3 & ((1 << RRD_ERR_L4_SHIFT) | + (1 << RRD_ERR_IPV4_SHIFT))) { + netdev_warn( + adpt->netdev, + "rx-chksum error, w2=%X\n", + rrd->word2); + break; + } + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } + } + /* vlan tag */ + if (rrd->word3 & (1 << RRD_VLTAGGED_SHIFT)) { + u16 tag = ntohs(FIELD_GETX(rrd->word2, RRD_VLTAG)); + __vlan_hwaccel_put_tag(skb, ntohs(tag)); + } + qnum = FIELD_GETX(rrd->word2, RRD_RSSQ) % adpt->nr_rxq; + tmp_rxq = ALX_CAP(&adpt->hw, MRQ) ? + rxq : adpt->qnapi[qnum]->rxq; + alx_skb_queue_tail(tmp_rxq, skb); + +next_pkt: + + if (++rxq->cidx == rxq->count) + rxq->cidx = 0; + if (++rxq->rrd_cidx == rxq->count) + rxq->rrd_cidx = 0; + + if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) + rfd_cleaned -= alx_alloc_rxring_buf(adpt, rxq); + } + + if (rfd_cleaned) + alx_alloc_rxring_buf(adpt, rxq); + + clear_bit(ALX_RQ_USING, &rxq->flag); + + return true; +} + +static inline struct napi_struct *alx_rxq_to_napi( + struct alx_rx_queue *rxq) +{ + struct alx_adapter *adpt = netdev_priv(rxq->netdev); + + return &adpt->qnapi[rxq->qidx]->napi; +} + +static bool alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget) +{ + struct sk_buff_head list; + bool empty; + + __skb_queue_head_init(&list); + alx_dispatch_skb(alx_hw_rxq(rxq)); + empty = alx_skb_dequeue_n(rxq, budget, &list); + if (!skb_queue_empty(&list)) { + struct napi_struct *napi; + struct sk_buff *skb; + + napi = alx_rxq_to_napi(rxq); + while (!skb_queue_empty(&list)) { + skb = __skb_dequeue(&list); + napi_gro_receive(napi, skb); + } + } else { + struct alx_adapter *adpt = netdev_priv(rxq->netdev); + + netif_info(adpt, rx_status, adpt->netdev, + "no packet received for this rxQ\n"); + } + + + return empty; +} + +static int alx_request_msix(struct alx_adapter *adpt) +{ + struct net_device *netdev = adpt->netdev; + int i, err; + int vec; + + err = request_irq(adpt->msix_ent[0].vector, + alx_intr_msix_misc, 0, netdev->name, adpt); + if (err) + goto out; + + vec = 1; + for (i = 0; i < adpt->nr_napi; i++) { + struct alx_napi *np = adpt->qnapi[i]; + + if (np->txq && np->rxq) + sprintf(np->irq_lbl, "%s-TR-%u", netdev->name, i); + else if (np->txq) + sprintf(np->irq_lbl, "%s-T-%u", netdev->name, i); + else + sprintf(np->irq_lbl, "%s-R-%u", netdev->name, i); + + np->vec_idx = vec; + err = request_irq(adpt->msix_ent[vec].vector, + alx_msix_ring, 0, np->irq_lbl, np); + if (err) { + for (i--, vec--; i >= 0; i--) { + np = adpt->qnapi[i]; + free_irq(adpt->msix_ent[vec].vector, np); + } + free_irq(adpt->msix_ent[0].vector, adpt); + goto out; + } + vec++; + } + +out: + return err; +} + +static void alx_disable_msix(struct alx_adapter *adpt) +{ + if (adpt->msix_ent) { + pci_disable_msix(adpt->pdev); + kfree(adpt->msix_ent); + adpt->msix_ent = NULL; + } + ALX_FLAG_CLEAR(adpt, USING_MSIX); +} + +static void alx_disable_msi(struct alx_adapter *adpt) +{ + if (ALX_FLAG(adpt, USING_MSI)) { + pci_disable_msi(adpt->pdev); + ALX_FLAG_CLEAR(adpt, USING_MSI); + } +} + +static int txq_vec_mapping_shift[] = { + 0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT, + 0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT, + 1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT, + 1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT, +}; +static int rxq_vec_mapping_shift[] = { + 0, ALX_MSI_MAP_TBL1_RXQ0_SHIFT, + 0, ALX_MSI_MAP_TBL1_RXQ1_SHIFT, + 0, ALX_MSI_MAP_TBL1_RXQ2_SHIFT, + 0, ALX_MSI_MAP_TBL1_RXQ3_SHIFT, + 1, ALX_MSI_MAP_TBL2_RXQ4_SHIFT, + 1, ALX_MSI_MAP_TBL2_RXQ5_SHIFT, + 1, ALX_MSI_MAP_TBL2_RXQ6_SHIFT, + 1, ALX_MSI_MAP_TBL2_RXQ7_SHIFT, +}; +static void alx_config_vector_mapping(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + u32 tbl[2]; + int vect, idx, shft; + int i; + + tbl[0] = tbl[1] = 0; + + if (ALX_FLAG(adpt, USING_MSIX)) { + for (vect = 1, i = 0; i < adpt->nr_txq; i++, vect++) { + idx = txq_vec_mapping_shift[i * 2]; + shft = txq_vec_mapping_shift[i * 2 + 1]; + tbl[idx] |= vect << shft; + } + for (vect = 1, i = 0; i < adpt->nr_rxq; i++, vect++) { + idx = rxq_vec_mapping_shift[i * 2]; + shft = rxq_vec_mapping_shift[i * 2 + 1]; + tbl[idx] |= vect << shft; + } + } + ALX_MEM_W32(hw, ALX_MSI_MAP_TBL1, tbl[0]); + ALX_MEM_W32(hw, ALX_MSI_MAP_TBL2, tbl[1]); + ALX_MEM_W32(hw, ALX_MSI_ID_MAP, 0); +} + +void alx_disable_advanced_intr(struct alx_adapter *adpt) +{ + alx_disable_msix(adpt); + alx_disable_msi(adpt); + + /* clear vector/intr-event mapping */ + alx_config_vector_mapping(adpt); +} + +static void alx_irq_enable(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + int i; + + if (!atomic_dec_and_test(&adpt->irq_sem)) + return; + + /* level-1 interrupt switch */ + ALX_MEM_W32(hw, ALX_ISR, 0); + ALX_MEM_W32(hw, ALX_IMR, hw->imask); + ALX_MEM_FLUSH(hw); + + if (!ALX_FLAG(adpt, USING_MSIX)) + return; + + /* enable all individual MSIX IRQs */ + for (i = 0; i < adpt->nr_vec; i++) + alx_mask_msix(hw, i, false); +} + +static void alx_irq_disable(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + int i; + + atomic_inc(&adpt->irq_sem); + + ALX_MEM_W32(hw, ALX_ISR, ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_IMR, 0); + ALX_MEM_FLUSH(hw); + + if (ALX_FLAG(adpt, USING_MSIX)) { + for (i = 0; i < adpt->nr_vec; i++) { + alx_mask_msix(hw, i, true); + synchronize_irq(adpt->msix_ent[i].vector); + } + } else { + synchronize_irq(adpt->pdev->irq); + } +} + +static int alx_request_irq(struct alx_adapter *adpt) +{ + struct pci_dev *pdev = adpt->pdev; + struct alx_hw *hw = &adpt->hw; + int err; + u32 msi_ctrl; + + msi_ctrl = FIELDX(ALX_MSI_RETRANS_TM, hw->imt >> 1); + + if (ALX_FLAG(adpt, USING_MSIX)) { + ALX_MEM_W32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl); + err = alx_request_msix(adpt); + if (!err) + goto out; + /* fall back to MSI or legacy interrupt mode, + * re-alloc all resources + */ + alx_free_all_ring_resources(adpt); + alx_disable_msix(adpt); + adpt->nr_rxq = 1; + adpt->nr_txq = 1; + adpt->nr_napi = 1; + adpt->nr_vec = 1; + adpt->nr_hwrxq = 1; + alx_configure_rss(hw, false); + if (!pci_enable_msi(pdev)) + ALX_FLAG_SET(adpt, USING_MSI); + + err = alx_setup_all_ring_resources(adpt); + if (err) + goto out; + } + + if (ALX_FLAG(adpt, USING_MSI)) { + ALX_MEM_W32(hw, ALX_MSI_RETRANS_TIMER, + msi_ctrl | ALX_MSI_MASK_SEL_LINE); + err = request_irq(pdev->irq, alx_intr_msi, 0, + adpt->netdev->name, adpt); + if (!err) + goto out; + /* fall back to legacy interrupt */ + alx_disable_msi(adpt); + } + + ALX_MEM_W32(hw, ALX_MSI_RETRANS_TIMER, 0); + err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, + adpt->netdev->name, adpt); + + if (err) + netif_err(adpt, intr, adpt->netdev, + "request shared irq failed, err = %d\n", + err); + +out: + if (likely(!err)) { + alx_config_vector_mapping(adpt); + + netif_info(adpt, drv, adpt->netdev, + "nr_rxq=%d, nr_txq=%d, nr_napi=%d, nr_vec=%d\n", + adpt->nr_rxq, adpt->nr_txq, + adpt->nr_napi, adpt->nr_vec); + netif_info(adpt, drv, adpt->netdev, + "flags=%lX, Interrupt Mode: %s\n", + adpt->flags, + ALX_FLAG(adpt, USING_MSIX) ? "MSIX" : + ALX_FLAG(adpt, USING_MSI) ? "MSI" : "INTx"); + } else + netdev_err(adpt->netdev, + "register IRQ fail %d\n", + err); + + return err; +} + +static void alx_free_irq(struct alx_adapter *adpt) +{ + struct pci_dev *pdev = adpt->pdev; + int i, vec; + + if (ALX_FLAG(adpt, USING_MSIX)) { + free_irq(adpt->msix_ent[0].vector, adpt); + vec = 1; + for (i = 0; i < adpt->nr_napi; i++, vec++) + free_irq(adpt->msix_ent[vec].vector, adpt->qnapi[i]); + } else { + free_irq(pdev->irq, adpt); + } + alx_disable_advanced_intr(adpt); +} + + +static int alx_identify_hw(struct alx_adapter *adpt) +{ + struct pci_dev *pdev = adpt->pdev; + struct alx_hw *hw = &adpt->hw; + int rev; + int err = -EINVAL; + + hw->device_id = pdev->device; + hw->subdev_id = pdev->subsystem_device; + hw->subven_id = pdev->subsystem_vendor; + hw->revision = pdev->revision; + rev = ALX_REVID(hw); + + switch (ALX_DID(hw)) { + case ALX_DEV_ID_AR8161: + case ALX_DEV_ID_AR8162: + case ALX_DEV_ID_AR8171: + case ALX_DEV_ID_AR8172: + if (rev > ALX_REV_C0) + break; + err = 0; + ALX_CAP_SET(hw, L0S); + ALX_CAP_SET(hw, L1); + ALX_CAP_SET(hw, MTQ); + ALX_CAP_SET(hw, RSS); + ALX_CAP_SET(hw, MSIX); + ALX_CAP_SET(hw, SWOI); + hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; + if (rev < ALX_REV_C0) { + hw->ptrn_ofs = 0x600; + hw->max_ptrns = 8; + } else { + hw->ptrn_ofs = 0x14000; + hw->max_ptrns = 16; + } + break; + } + + if (!err && ALX_DID(hw) & 1) + ALX_CAP_SET(hw, GIGA); + + return err; +} + + +static const u8 def_rss_key[40] = { + 0xE2, 0x91, 0xD7, 0x3D, 0x18, 0x05, 0xEC, 0x6C, + 0x2A, 0x94, 0xB3, 0x0D, 0xA5, 0x4F, 0x2B, 0xEC, + 0xEA, 0x49, 0xAF, 0x7C, 0xE2, 0x14, 0xAD, 0x3D, + 0xB8, 0x55, 0xAA, 0xBE, 0x6A, 0x3E, 0x67, 0xEA, + 0x14, 0x36, 0x4D, 0x17, 0x3B, 0xED, 0x20, 0x0D, +}; + +void alx_init_def_rss_idt(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + int i, x, y; + u32 val; + + for (i = 0; i < hw->rss_idt_size; i++) { + val = ethtool_rxfh_indir_default(i, adpt->nr_rxq); + x = i >> 3; + y = i * 4 & 0x1F; + hw->rss_idt[x] &= ~(0xF << y); + hw->rss_idt[x] |= (val & 0xF) << y; + } +} + +/* alx_init_adapter - + * initialize general software structure (struct alx_adapter). + * fields are inited based on PCI device information. + */ +static int alx_init_sw(struct alx_adapter *adpt) +{ + struct pci_dev *pdev = adpt->pdev; + struct alx_hw *hw = &adpt->hw; + int i, err; + + err = alx_identify_hw(adpt); + if (err) { + dev_err(&pdev->dev, "unrecognize the chip, aborting\n"); + return err; + } + + /* assign patch flag for specific platforms */ + alx_patch_assign(hw); + + memcpy(hw->rss_key, def_rss_key, sizeof(def_rss_key)); + hw->rss_idt_size = 128; + hw->rss_hash_type = ALX_RSS_HASH_TYPE_ALL; + hw->smb_timer = 400; + hw->mtu = adpt->netdev->mtu; + adpt->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); + adpt->tx_ringsz = 256; + adpt->rx_ringsz = 512; + hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY; + hw->imt = 200; + hw->imask = ALX_ISR_MISC; + hw->dma_chnl = hw->max_dma_chnl; + hw->ith_tpd = adpt->tx_ringsz / 3; + hw->link_up = false; + hw->link_duplex = 0; + hw->link_speed = SPEED_0; + hw->adv_cfg = ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_1000baseT_Full; + hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; + hw->wrr_ctrl = ALX_WRR_PRI_RESTRICT_NONE; + for (i = 0; i < ARRAY_SIZE(hw->wrr); i++) + hw->wrr[i] = 4; + + hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | + ALX_MAC_CTRL_MHASH_ALG_HI5B | + ALX_MAC_CTRL_BRD_EN | + ALX_MAC_CTRL_PCRCE | + ALX_MAC_CTRL_CRCE | + ALX_MAC_CTRL_RXFC_EN | + ALX_MAC_CTRL_TXFC_EN | + FIELDX(ALX_MAC_CTRL_PRMBLEN, 7); + hw->is_fpga = false; + + atomic_set(&adpt->irq_sem, 1); + ALX_FLAG_SET(adpt, HALT); + + return err; +} + + +static void alx_set_vlan_mode(struct alx_hw *hw, + netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_RX) + hw->rx_ctrl |= ALX_MAC_CTRL_VLANSTRIP; + else + hw->rx_ctrl &= ~ALX_MAC_CTRL_VLANSTRIP; + + ALX_MEM_W32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + + +static netdev_features_t alx_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_RX) + features |= NETIF_F_HW_VLAN_TX; + else + features &= ~NETIF_F_HW_VLAN_TX; + + if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return features; +} + + +static int alx_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; + + if (!(changed & NETIF_F_HW_VLAN_RX)) + return 0; + + alx_set_vlan_mode(&adpt->hw, features); + + return 0; +} + +/* alx_change_mtu - Change the Maximum Transfer Unit */ +static int alx_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ALX_MIN_FRAME_SIZE) || + (max_frame > ALX_MAX_FRAME_SIZE)) { + netif_err(adpt, hw, netdev, + "invalid MTU setting (%x)\n", + new_mtu); + return -EINVAL; + } + /* set MTU */ + if (old_mtu != new_mtu) { + netif_info(adpt, drv, adpt->netdev, + "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + adpt->hw.mtu = new_mtu; + adpt->rxbuf_size = new_mtu > ALX_DEF_RXBUF_SIZE ? + ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; + netdev_update_features(netdev); + if (netif_running(netdev)) + alx_reinit(adpt, false); + } + + return 0; +} + +/* configure hardware everything except: + * 1. interrupt vectors + * 2. enable control for rx modules + */ +void alx_configure(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + + alx_configure_basic(hw); + alx_configure_rss(hw, adpt->nr_rxq > 1); + __alx_set_rx_mode(adpt->netdev); + alx_set_vlan_mode(hw, adpt->netdev->features); +} + +static void alx_netif_stop(struct alx_adapter *adpt) +{ + int i; + + adpt->netdev->trans_start = jiffies; + if (netif_carrier_ok(adpt->netdev)) { + netif_carrier_off(adpt->netdev); + netif_tx_disable(adpt->netdev); + for (i = 0; i < adpt->nr_napi; i++) + napi_disable(&adpt->qnapi[i]->napi); + } +} + +static void alx_netif_start(struct alx_adapter *adpt) +{ + int i; + + netif_tx_wake_all_queues(adpt->netdev); + for (i = 0; i < adpt->nr_napi; i++) + napi_enable(&adpt->qnapi[i]->napi); + netif_carrier_on(adpt->netdev); +} + +static bool alx_enable_msix(struct alx_adapter *adpt) +{ + int nr_txq, nr_rxq, vec_req; + int i, err; + + nr_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES); + nr_rxq = min_t(int, num_online_cpus(), ALX_MAX_RX_QUEUES); + nr_rxq = rounddown_pow_of_two(nr_rxq); + /* one more vector for PHY link change & timer & other events */ + vec_req = max_t(int, nr_txq, nr_rxq) + 1; + + if (vec_req <= 2) { + netif_info(adpt, intr, adpt->netdev, + "cpu core num is less, MSI-X isn't necessary\n"); + return false; + } + + adpt->msix_ent = kcalloc(vec_req, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adpt->msix_ent) { + netif_warn(adpt, intr, adpt->netdev, + "can't alloc msix entries\n"); + return false; + } + for (i = 0; i < vec_req; i++) + adpt->msix_ent[i].entry = i; + + err = pci_enable_msix(adpt->pdev, adpt->msix_ent, vec_req); + if (err) { + kfree(adpt->msix_ent); + adpt->msix_ent = NULL; + netif_warn(adpt, intr, adpt->netdev, + "can't enable MSI-X interrupt\n"); + return false; + } + + adpt->nr_txq = nr_txq; + adpt->nr_rxq = nr_rxq; + adpt->nr_vec = vec_req; + adpt->nr_napi = vec_req - 1; + adpt->nr_hwrxq = ALX_CAP(&adpt->hw, MRQ) ? adpt->nr_rxq : 1; + + return true; +} + +void alx_init_intr(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + + if ((ALX_CAP(hw, MTQ) || ALX_CAP(hw, RSS)) && ALX_CAP(hw, MSIX)) { + if (alx_enable_msix(adpt)) + ALX_FLAG_SET(adpt, USING_MSIX); + } + if (!ALX_FLAG(adpt, USING_MSIX)) { + adpt->nr_txq = 1; + adpt->nr_rxq = 1; + adpt->nr_napi = 1; + adpt->nr_vec = 1; + adpt->nr_hwrxq = 1; + + if (!pci_enable_msi(adpt->pdev)) + ALX_FLAG_SET(adpt, USING_MSI); + } +} + +static int __alx_open(struct alx_adapter *adpt, bool resume) +{ + int err; + + /* decide interrupt mode, some resources allocation depend on it */ + alx_init_intr(adpt); + + /* init rss indirection table */ + alx_init_def_rss_idt(adpt); + + if (!resume) + netif_carrier_off(adpt->netdev); + + /* allocate all memory resources */ + err = alx_setup_all_ring_resources(adpt); + if (err) + goto err_out; + + /* make hardware ready before allocate interrupt */ + alx_configure(adpt); + + err = alx_request_irq(adpt); + if (err) + goto err_out; + + /* netif_set_real_num_tx/rx_queues need rtnl_lock held */ + if (resume) + rtnl_lock(); + netif_set_real_num_tx_queues(adpt->netdev, adpt->nr_txq); + netif_set_real_num_rx_queues(adpt->netdev, adpt->nr_rxq); + if (resume) + rtnl_unlock(); + + ALX_FLAG_CLEAR(adpt, HALT); + + /* clear old interrupts */ + ALX_MEM_W32(&adpt->hw, ALX_ISR, (u32)~ALX_ISR_DIS); + + alx_irq_enable(adpt); + + if (!resume) + netif_tx_start_all_queues(adpt->netdev); + + ALX_FLAG_SET(adpt, TASK_CHK_LINK); + alx_schedule_work(adpt); + return 0; + +err_out: + + alx_free_all_ring_resources(adpt); + alx_disable_advanced_intr(adpt); + return err; +} + +static void alx_halt(struct alx_adapter *adpt, bool in_task) +{ + struct alx_hw *hw = &adpt->hw; + + ALX_FLAG_SET(adpt, HALT); + if (!in_task) + alx_cancel_work(adpt); + + alx_netif_stop(adpt); + hw->link_up = false; + hw->link_speed = SPEED_0; + + alx_reset_mac(hw); + + /* disable l0s/l1 */ + alx_enable_aspm(hw, false, false); + alx_irq_disable(adpt); + alx_free_all_rings_buf(adpt); +} + +static void alx_activate(struct alx_adapter *adpt) +{ + /* hardware setting lost, restore it */ + alx_reinit_rings(adpt); + alx_configure(adpt); + + ALX_FLAG_CLEAR(adpt, HALT); + /* clear old interrupts */ + ALX_MEM_W32(&adpt->hw, ALX_ISR, (u32)~ALX_ISR_DIS); + + alx_irq_enable(adpt); + + ALX_FLAG_SET(adpt, TASK_CHK_LINK); + alx_schedule_work(adpt); +} + +static void __alx_stop(struct alx_adapter *adpt) +{ + alx_halt(adpt, false); + + alx_free_irq(adpt); + + alx_free_all_ring_resources(adpt); +} + +static void alx_init_ring_ptrs(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + struct alx_napi *np; + int i, tx_idx, rx_idx; + u32 addr_hi; + u16 txring_header_reg[] = {ALX_TPD_PRI0_ADDR_LO, ALX_TPD_PRI1_ADDR_LO, + ALX_TPD_PRI2_ADDR_LO, ALX_TPD_PRI3_ADDR_LO}; + u16 rfdring_header_reg[] = {ALX_RFD_ADDR_LO}; + u16 rrdring_header_reg[] = {ALX_RRD_ADDR_LO}; + + tx_idx = 0; + rx_idx = 0; + for (i = 0; i < adpt->nr_napi; i++) { + np = adpt->qnapi[i]; + if (np->rxq) { + np->rxq->pidx = 0; + np->rxq->cidx = 0; + np->rxq->rrd_cidx = 0; + if (!ALX_CAP(hw, MRQ) && rx_idx == 0) { + ALX_MEM_W32(hw, rfdring_header_reg[0], + np->rxq->rfd_dma); + ALX_MEM_W32(hw, rrdring_header_reg[0], + np->rxq->rrd_dma); + } + rx_idx++; + } + if (np->txq) { + np->txq->pidx = 0; + atomic_set(&np->txq->cidx, 0); + ALX_MEM_W32(hw, txring_header_reg[tx_idx], + np->txq->tpd_dma); + tx_idx++; + } + } + + addr_hi = ((u64)adpt->ring_header.dma) >> 32; + ALX_MEM_W32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); + ALX_MEM_W32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); + ALX_MEM_W32(hw, ALX_TPD_RING_SZ, adpt->tx_ringsz); + ALX_MEM_W32(hw, ALX_RRD_RING_SZ, adpt->rx_ringsz); + ALX_MEM_W32(hw, ALX_RFD_RING_SZ, adpt->rx_ringsz); + ALX_MEM_W32(hw, ALX_RFD_BUF_SZ, adpt->rxbuf_size); + + /* load these ptrs into chip internal */ + ALX_MEM_W32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); +} + +static void alx_show_speed(struct alx_adapter *adpt, u16 speed) +{ + netif_info(adpt, link, adpt->netdev, + "NIC Link Up: %s\n", + speed_desc(speed)); +} + +static int alx_reinit_rings(struct alx_adapter *adpt) +{ + int i, err = 0; + + alx_free_all_rings_buf(adpt); + + /* set rings' header to HW register */ + alx_init_ring_ptrs(adpt); + + /* alloc hw-rxing buf */ + for (i = 0; i < adpt->nr_hwrxq; i++) { + int count; + + count = alx_alloc_rxring_buf(adpt, adpt->qnapi[i]->rxq); + if (unlikely(!count)) { + err = -ENOMEM; + break; + } + } + + return err; +} + + + +static void alx_check_link(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + u16 speed, old_speed; + bool link_up, old_link_up; + int err; + + if (ALX_FLAG(adpt, HALT)) + return; + + /* clear PHY internal interrupt status, + * otherwise the Main interrupt status will be asserted + * for ever. + */ + alx_clear_phy_intr(hw); + + err = alx_get_phy_link(hw, &link_up, &speed); + if (err) + goto out; + + /* open interrutp mask */ + hw->imask |= ALX_ISR_PHY; + ALX_MEM_W32(hw, ALX_IMR, hw->imask); + + if (!link_up && !hw->link_up) + goto out; + + old_speed = hw->link_speed + hw->link_duplex; + old_link_up = hw->link_up; + + if (link_up) { + /* same speed ? */ + if (old_link_up && old_speed == speed) + goto out; + + alx_show_speed(adpt, speed); + hw->link_duplex = speed % 10; + hw->link_speed = speed - hw->link_duplex; + hw->link_up = true; + alx_post_phy_link(hw, hw->link_speed, ALX_CAP(hw, AZ)); + alx_enable_aspm(hw, ALX_CAP(hw, L0S), ALX_CAP(hw, L1)); + alx_start_mac(hw); + + /* link kept, just speed changed */ + if (old_link_up) + goto out; + /* link changed from 'down' to 'up' */ + alx_netif_start(adpt); + goto out; + } + + /* link changed from 'up' to 'down' */ + alx_netif_stop(adpt); + hw->link_up = false; + hw->link_speed = SPEED_0; + netif_info(adpt, link, adpt->netdev, "NIC Link Down\n"); + err = alx_reset_mac(hw); + if (err) { + netif_err(adpt, hw, adpt->netdev, + "linkdown:reset_mac fail %d\n", err); + err = -EIO; + goto out; + } + alx_irq_disable(adpt); + + /* reset-mac cause all settings on HW lost, + * following steps restore all of them and + * refresh whole RX/TX rings + */ + err = alx_reinit_rings(adpt); + if (err) + goto out; + alx_configure(adpt); + alx_enable_aspm(hw, false, ALX_CAP(hw, L1)); + alx_post_phy_link(hw, SPEED_0, ALX_CAP(hw, AZ)); + alx_irq_enable(adpt); + +out: + + if (err) { + ALX_FLAG_SET(adpt, TASK_RESET); + alx_schedule_work(adpt); + } +} + +/* alx_open - Called when a network interface is made active */ +static int alx_open(struct net_device *netdev) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + int err; + + /* during diag running, disallow open */ + if (ALX_FLAG(adpt, TESTING)) + return -EBUSY; + + err = __alx_open(adpt, false); + + return err; +} + +/* alx_stop - Disables a network interface */ +static int alx_stop(struct net_device *netdev) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + + WARN_ON(ALX_FLAG(adpt, RESETING)); + + __alx_stop(adpt); + + return 0; +} + +static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en) +{ + struct alx_adapter *adpt = pci_get_drvdata(pdev); + struct net_device *netdev = adpt->netdev; + struct alx_hw *hw = &adpt->hw; + int err; + u16 speed; + + netif_device_detach(netdev); + if (netif_running(netdev)) + __alx_stop(adpt); + +#ifdef CONFIG_PM_SLEEP + err = pci_save_state(pdev); + if (err) + goto out; +#endif + + err = alx_select_powersaving_speed(hw, &speed); + if (!err) + err = alx_clear_phy_intr(hw); + if (!err) + err = alx_pre_suspend(hw, speed); + if (!err) + err = alx_config_wol(hw); + if (err) + goto out; + + *wol_en = false; + if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) { + netif_info(adpt, wol, netdev, + "wol: ctrl=%X, speed=%X\n", + hw->sleep_ctrl, speed); + + device_set_wakeup_enable(&pdev->dev, true); + *wol_en = true; + } + + pci_disable_device(pdev); + +out: + if (unlikely(err)) { + netif_info(adpt, hw, netdev, + "shutown err(%x)\n", + err); + err = -EIO; + } + + return err; +} + +static void alx_shutdown(struct pci_dev *pdev) +{ + int err; + bool wol_en; + + err = __alx_shutdown(pdev, &wol_en); + if (likely(!err)) { + pci_wake_from_d3(pdev, wol_en); + pci_set_power_state(pdev, PCI_D3hot); + } else { + dev_err(&pdev->dev, "shutdown fail %d\n", err); + } +} + +#ifdef CONFIG_PM_SLEEP +static int alx_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int err; + bool wol_en; + + err = __alx_shutdown(pdev, &wol_en); + if (unlikely(err)) { + dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err); + err = -EIO; + goto out; + } + if (wol_en) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + +out: + return err; +} + +static int alx_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct alx_adapter *adpt = pci_get_drvdata(pdev); + struct net_device *netdev = adpt->netdev; + struct alx_hw *hw = &adpt->hw; + int err; + + if (!netif_running(netdev)) + return 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + hw->link_up = false; + hw->link_speed = SPEED_0; + hw->imask = ALX_ISR_MISC; + + alx_reset_pcie(hw); + alx_reset_phy(hw, !hw->hib_patch); + err = alx_reset_mac(hw); + if (err) { + netif_err(adpt, hw, adpt->netdev, + "resume:reset_mac fail %d\n", + err); + return -EIO; + } + err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); + if (err) { + netif_err(adpt, hw, adpt->netdev, + "resume:setup_speed_duplex fail %d\n", + err); + return -EIO; + } + + if (netif_running(netdev)) { + err = __alx_open(adpt, true); + if (err) + return err; + } + + netif_device_attach(netdev); + + return err; +} +#endif + + + +/* alx_update_hw_stats - Update the board statistics counters. */ +static void alx_update_hw_stats(struct alx_adapter *adpt) +{ + if (ALX_FLAG(adpt, HALT) || ALX_FLAG(adpt, RESETING)) + return; + + __alx_update_hw_stats(&adpt->hw); +} + +/* alx_get_stats - Get System Network Statistics + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *alx_get_stats(struct net_device *netdev) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct net_device_stats *net_stats = &netdev->stats; + struct alx_hw_stats *hw_stats = &adpt->hw.stats; + + spin_lock(&adpt->smb_lock); + + alx_update_hw_stats(adpt); + + net_stats->tx_packets = hw_stats->tx_ok; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->rx_packets = hw_stats->rx_ok; + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_single_col + + hw_stats->tx_multi_col * 2 + + hw_stats->tx_late_col + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + hw_stats->rx_ov_sz + + hw_stats->rx_ov_rrd + hw_stats->rx_align_err; + + net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_over_errors = hw_stats->rx_ov_rrd + hw_stats->rx_ov_rxf; + + net_stats->rx_missed_errors = hw_stats->rx_ov_rrd + hw_stats->rx_ov_rxf; + + net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + + hw_stats->tx_underrun + hw_stats->tx_trunc; + + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + spin_unlock(&adpt->smb_lock); + + return net_stats; +} + +static void alx_update_stats(struct alx_adapter *adpt) +{ + spin_lock(&adpt->smb_lock); + alx_update_hw_stats(adpt); + spin_unlock(&adpt->smb_lock); +} + +void alx_reinit(struct alx_adapter *adpt, bool in_task) +{ + WARN_ON(in_interrupt()); + + while (test_and_set_bit(ALX_FLAG_RESETING, &adpt->flags)) + msleep(20); + + if (ALX_FLAG(adpt, HALT)) + return; + + alx_halt(adpt, in_task); + alx_activate(adpt); + + ALX_FLAG_CLEAR(adpt, RESETING); +} + +/* alx_task - manages and runs subtasks */ +static void alx_task(struct work_struct *work) +{ + struct alx_adapter *adpt = container_of(work, struct alx_adapter, task); + + /* don't support reentrance */ + while (test_and_set_bit(ALX_FLAG_TASK_PENDING, &adpt->flags)) + msleep(20); + + if (ALX_FLAG(adpt, HALT)) + goto out; + + if (test_and_clear_bit(ALX_FLAG_TASK_RESET, &adpt->flags)) { + netif_info(adpt, hw, adpt->netdev, + "task:alx_reinit\n"); + alx_reinit(adpt, true); + } + + if (test_and_clear_bit(ALX_FLAG_TASK_UPDATE_SMB, &adpt->flags)) + alx_update_stats(adpt); + + if (test_and_clear_bit(ALX_FLAG_TASK_CHK_LINK, &adpt->flags)) + alx_check_link(adpt); + +out: + ALX_FLAG_CLEAR(adpt, TASK_PENDING); +} + + +static irqreturn_t alx_msix_ring(int irq, void *data) +{ + struct alx_napi *np = data; + struct alx_adapter *adpt = np->adpt; + struct alx_hw *hw = &adpt->hw; + + /* mask interrupt to ACK chip */ + alx_mask_msix(hw, np->vec_idx, true); + /* clear interrutp status */ + ALX_MEM_W32(hw, ALX_ISR, np->vec_mask); + + if (!ALX_FLAG(adpt, HALT)) + napi_schedule(&np->napi); + + return IRQ_HANDLED; +} + +static inline bool alx_handle_intr_misc(struct alx_adapter *adpt, u32 intr) +{ + struct alx_hw *hw = &adpt->hw; + + if (unlikely(intr & ALX_ISR_FATAL)) { + netif_info(adpt, hw, adpt->netdev, + "intr-fatal:%08X\n", intr); + ALX_FLAG_SET(adpt, TASK_RESET); + alx_schedule_work(adpt); + return true; + } + + if (intr & ALX_ISR_ALERT) + netdev_warn(adpt->netdev, "interrutp alert :%x\n", intr); + + if (intr & ALX_ISR_SMB) { + ALX_FLAG_SET(adpt, TASK_UPDATE_SMB); + alx_schedule_work(adpt); + } + + if (intr & ALX_ISR_PHY) { + /* suppress PHY interrupt, because the source + * is from PHY internal. only the internal status + * is cleared, the interrupt status could be cleared. + */ + hw->imask &= ~ALX_ISR_PHY; + ALX_MEM_W32(hw, ALX_IMR, hw->imask); + ALX_FLAG_SET(adpt, TASK_CHK_LINK); + alx_schedule_work(adpt); + } + + return false; +} + +static irqreturn_t alx_intr_msix_misc(int irq, void *data) +{ + struct alx_adapter *adpt = data; + struct alx_hw *hw = &adpt->hw; + u32 intr; + + /* mask interrupt to ACK chip */ + alx_mask_msix(hw, 0, true); + + /* read interrupt status */ + ALX_MEM_R32(hw, ALX_ISR, &intr); + intr &= (hw->imask & ~ALX_ISR_ALL_QUEUES); + + if (alx_handle_intr_misc(adpt, intr)) + return IRQ_HANDLED; + + /* clear interrupt status */ + ALX_MEM_W32(hw, ALX_ISR, intr); + + /* enable interrupt again */ + if (!ALX_FLAG(adpt, HALT)) + alx_mask_msix(hw, 0, false); + + return IRQ_HANDLED; +} + +static inline irqreturn_t alx_intr_1(struct alx_adapter *adpt, u32 intr) +{ + struct alx_hw *hw = &adpt->hw; + + /* ACK interrupt */ + netif_info(adpt, intr, adpt->netdev, + "ACK interrupt: 0x%lx\n", + intr | ALX_ISR_DIS); + ALX_MEM_W32(hw, ALX_ISR, intr | ALX_ISR_DIS); + intr &= hw->imask; + + if (alx_handle_intr_misc(adpt, intr)) + return IRQ_HANDLED; + + if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { + napi_schedule(&adpt->qnapi[0]->napi); + /* mask rx/tx interrupt, enable them when napi complete */ + hw->imask &= ~ALX_ISR_ALL_QUEUES; + ALX_MEM_W32(hw, ALX_IMR, hw->imask); + } + + ALX_MEM_W32(hw, ALX_ISR, 0); + + return IRQ_HANDLED; +} + + +static irqreturn_t alx_intr_msi(int irq, void *data) +{ + struct alx_adapter *adpt = data; + u32 intr; + + /* read interrupt status */ + ALX_MEM_R32(&adpt->hw, ALX_ISR, &intr); + + return alx_intr_1(adpt, intr); +} + +static irqreturn_t alx_intr_legacy(int irq, void *data) +{ + struct alx_adapter *adpt = data; + struct alx_hw *hw = &adpt->hw; + u32 intr; + + /* read interrupt status */ + ALX_MEM_R32(hw, ALX_ISR, &intr); + if (intr & ALX_ISR_DIS || 0 == (intr & hw->imask)) { + u32 mask; + + ALX_MEM_R32(hw, ALX_IMR, &mask); + netif_info(adpt, intr, adpt->netdev, + "seems a wild interrupt, intr=%X, imask=%X, %X\n", + intr, hw->imask, mask); + + return IRQ_NONE; + } + + return alx_intr_1(adpt, intr); +} + + +static int alx_poll(struct napi_struct *napi, int budget) +{ + struct alx_napi *np = container_of(napi, struct alx_napi, napi); + struct alx_adapter *adpt = np->adpt; + bool complete = true; + + netif_info(adpt, intr, adpt->netdev, + "alx_poll, budget(%d)\n", + budget); + + if (np->txq) + complete = alx_clean_tx_irq(np->txq); + if (np->rxq) + complete &= alx_clean_rx_irq(np->rxq, budget); + + if (!complete) + return budget; + + /* rx-packet finished, exit the polling mode */ + napi_complete(&np->napi); + + /* enable interrupt */ + if (!ALX_FLAG(adpt, HALT)) { + struct alx_hw *hw = &adpt->hw; + + if (ALX_FLAG(adpt, USING_MSIX)) + alx_mask_msix(hw, np->vec_idx, false); + else { + /* TODO: need irq spinlock for imask ?? */ + hw->imask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; + ALX_MEM_W32(hw, ALX_IMR, hw->imask); + } + ALX_MEM_FLUSH(hw); + } + + return 0; +} + +static inline struct alx_tx_queue *alx_tx_queue_mapping( + struct alx_adapter *adpt, + struct sk_buff *skb) +{ + int index = skb_get_queue_mapping(skb); + + if (index >= adpt->nr_txq) + index = index % adpt->nr_txq; + + return adpt->qnapi[index]->txq; +} + +static inline int alx_tpd_req(struct sk_buff *skb) +{ + int num; + + num = skb_shinfo(skb)->nr_frags + 1; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + num++; + + return num; +} + +/* get custom checksum offload params + * return val: + * neg-val: drop this skb + * 0: no custom checksum offload + * pos-val: have custom cksum offload + */ +static int alx_tx_csum(struct sk_buff *skb, struct tpd_desc *first) +{ + u8 cso, css; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + cso = skb_checksum_start_offset(skb); + if (cso & 0x1) + return -1; + + css = cso + skb->csum_offset; + first->word1 |= FIELDX(TPD_CXSUMSTART, cso >> 1); + first->word1 |= FIELDX(TPD_CXSUMOFFSET, css >> 1); + first->word1 |= 1 << TPD_CXSUM_EN_SHIFT; + + return 1; +} + +static int alx_tso(struct sk_buff *skb, struct tpd_desc *first) +{ + int hdr_len; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL || + !skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(err)) + return err; + } + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph; + + iph = ip_hdr(skb); + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic( + iph->saddr, + iph->daddr, + 0, IPPROTO_TCP, 0); + first->word1 |= 1 << TPD_IPV4_SHIFT; + first->word1 |= FIELDX(TPD_L4HDROFFSET, + skb_transport_offset(skb)); + if (unlikely(skb->len == hdr_len)) { + /* no tcp payload */ + first->word1 |= 1 << TPD_IP_XSUM_SHIFT; + first->word1 |= 1 << TPD_TCP_XSUM_SHIFT; + return 0; + } + first->word1 |= 1 << TPD_LSO_EN_SHIFT; + first->word1 |= FIELDX(TPD_MSS, skb_shinfo(skb)->gso_size); + } else if (skb_is_gso_v6(skb)) { + struct ipv6hdr *ip6h; + + ip6h = ipv6_hdr(skb); + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + ip6h->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic( + &ip6h->saddr, + &ip6h->daddr, + 0, IPPROTO_TCP, 0); + first->word1 |= FIELDX(TPD_L4HDROFFSET, + skb_transport_offset(skb)); + if (unlikely(skb->len == hdr_len)) { + /* no tcp payload */ + ip6h->payload_len = skb->len - + ((unsigned char *)ip6h - skb->data) - + sizeof(struct ipv6hdr); + first->word1 |= 1 << TPD_IP_XSUM_SHIFT; + first->word1 |= 1 << TPD_TCP_XSUM_SHIFT; + return 0; + } + /* for LSOv2, the 1st TPD just provides packet length */ + first->adrl.l.pkt_len = skb->len; + first->word1 |= 1 << TPD_LSO_EN_SHIFT; + first->word1 |= 1 << TPD_LSO_V2_SHIFT; + first->word1 |= FIELDX(TPD_MSS, skb_shinfo(skb)->gso_size); + } + + return 1; +} + +static int alx_tx_map(struct alx_tx_queue *txq, struct sk_buff *skb) +{ + struct tpd_desc *tpd, *first_tpd; + struct alx_buffer *buf, *first_buf; + dma_addr_t dma; + u16 producer, maplen, f; + + producer = txq->pidx; + + first_tpd = txq->tpd_hdr + producer; + first_buf = txq->bf_info + producer; + tpd = first_tpd; + buf = first_buf; + if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) { + if (++producer == txq->count) + producer = 0; + tpd = txq->tpd_hdr + producer; + buf = txq->bf_info + producer; + tpd->word0 = first_tpd->word0; + tpd->word1 = first_tpd->word1; + } + maplen = skb_headlen(skb); + dma = dma_map_single(txq->dev, skb->data, maplen, DMA_TO_DEVICE); + if (dma_mapping_error(txq->dev, dma)) + goto err_dma; + + dma_unmap_len_set(buf, size, maplen); + dma_unmap_addr_set(buf, dma, dma); + + tpd->adrl.addr = cpu_to_le64(dma); + FIELD_SET32(tpd->word0, TPD_BUFLEN, maplen); + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + if (++producer == txq->count) + producer = 0; + tpd = txq->tpd_hdr + producer; + buf = txq->bf_info + producer; + tpd->word0 = first_tpd->word0; + tpd->word1 = first_tpd->word1; + maplen = skb_frag_size(frag); + dma = skb_frag_dma_map(txq->dev, frag, 0, + maplen, DMA_TO_DEVICE); + if (dma_mapping_error(txq->dev, dma)) + goto err_dma; + dma_unmap_len_set(buf, size, maplen); + dma_unmap_addr_set(buf, dma, dma); + + tpd->adrl.addr = cpu_to_le64(dma); + FIELD_SET32(tpd->word0, TPD_BUFLEN, maplen); + } + /* last TPD */ + tpd->word1 |= 1 << TPD_EOP_SHIFT; + + if (++producer == txq->count) + producer = 0; + + first_buf->flags |= ALX_BUF_TX_FIRSTFRAG; + buf->skb = skb; + txq->pidx = producer; + + return 0; + +err_dma: + + for (f = txq->pidx; f != producer;) { + alx_txbuf_unmap_and_free(txq, f); + if (++f == txq->count) + f = 0; + } + return -1; +} + +static netdev_tx_t alx_start_xmit_ring(struct alx_tx_queue *txq, + struct sk_buff *skb) +{ + struct alx_adapter *adpt; + struct netdev_queue *netque; + struct tpd_desc *first; + int budget, tpdreq; + int do_tso; + + adpt = netdev_priv(txq->netdev); + netque = netdev_get_tx_queue(txq->netdev, skb_get_queue_mapping(skb)); + + tpdreq = alx_tpd_req(skb); + budget = alx_tpd_avail(txq); + + if (unlikely(budget < tpdreq)) { + if (!netif_tx_queue_stopped(netque)) { + netif_tx_stop_queue(netque); + + /* TX reclaim might have plenty of free TPD + * but see tx_queue is active (because its + * judement doesn't acquire tx-spin-lock, + * this situation cause the TX-queue stop and + * never be wakeup. + * try one more time + */ + budget = alx_tpd_avail(txq); + if (budget >= tpdreq) { + netif_tx_wake_queue(netque); + goto tx_conti; + } + netif_err(adpt, tx_err, adpt->netdev, + "TPD Ring is full when queue awake!\n"); + } + return NETDEV_TX_BUSY; + } + +tx_conti: + + first = txq->tpd_hdr + txq->pidx; + memset(first, 0, sizeof(struct tpd_desc)); + + /* NOTE, chip only supports single-VLAN insertion (81-00-TAG) */ + if (vlan_tx_tag_present(skb)) { + first->word1 |= 1 << TPD_INS_VLTAG_SHIFT; + first->word0 |= FIELDX(TPD_VLTAG, htons(vlan_tx_tag_get(skb))); + } + if (skb->protocol == htons(ETH_P_8021Q)) + first->word1 |= 1 << TPD_VLTAGGED_SHIFT; + if (skb_network_offset(skb) != ETH_HLEN) + first->word1 |= 1 << TPD_ETHTYPE_SHIFT; + + do_tso = alx_tso(skb, first); + if (do_tso < 0) + goto drop; + else if (!do_tso && alx_tx_csum(skb, first) < 0) + goto drop; + + if (alx_tx_map(txq, skb) < 0) + goto drop; + + netdev_tx_sent_queue(netque, skb->len); + + /* refresh produce idx on HW */ + wmb(); + ALX_MEM_W16(&adpt->hw, txq->p_reg, txq->pidx); + + netif_info(adpt, tx_done, adpt->netdev, + "TX[Preg:%X]: producer = 0x%x, consumer = 0x%x\n", + txq->p_reg, txq->pidx, atomic_read(&txq->cidx)); + + return NETDEV_TX_OK; + +drop: + netif_info(adpt, tx_done, adpt->netdev, + "tx-skb(%d) dropped\n", + skb->len); + memset(first, 0, sizeof(struct tpd_desc)); + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static netdev_tx_t alx_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + + if (ALX_FLAG(adpt, HALT)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + return alx_start_xmit_ring(alx_tx_queue_mapping(adpt, skb), skb); +} + + +static void alx_dump_state(struct alx_adapter *adpt) +{ + struct alx_hw *hw = &adpt->hw; + struct alx_tx_queue *txq; + struct tpd_desc *tpd; + u16 begin, end; + int i; + + for (i = 0; i < adpt->nr_txq; i++) { + + txq = adpt->qnapi[i]->txq; + begin = txq->pidx >= 8 ? (txq->pidx - 8) : + (txq->count + txq->pidx - 8); + end = txq->pidx + 4; + if (end >= txq->count) + end -= txq->count; + + netif_err(adpt, tx_err, adpt->netdev, + "-----------------TPD-ring(%d)------------------\n", + i); + + while (begin != end) { + tpd = txq->tpd_hdr + begin; + netif_err(adpt, tx_err, adpt->netdev, + "%X: W0=%08X, W1=%08X, W2=%X\n", + begin, tpd->word0, tpd->word1, + tpd->adrl.l.pkt_len); + if (++begin >= txq->count) + begin = 0; + } + } + + netif_err(adpt, tx_err, adpt->netdev, + "---------------dump registers-----------------\n"); + end = 0x1800; + for (begin = 0x1400; begin < end; begin += 16) { + u32 v1, v2, v3, v4; + + ALX_MEM_R32(hw, begin, &v1); + ALX_MEM_R32(hw, begin+4, &v2); + ALX_MEM_R32(hw, begin+8, &v3); + ALX_MEM_R32(hw, begin+12, &v4); + netif_err(adpt, tx_err, adpt->netdev, + "%04X: %08X,%08X,%08X,%08X\n", + begin, v1, v2, v3, v4); + } +} + +static void alx_tx_timeout(struct net_device *dev) +{ + struct alx_adapter *adpt = netdev_priv(dev); + + alx_dump_state(adpt); + + ALX_FLAG_SET(adpt, TASK_RESET); + alx_schedule_work(adpt); +} + +static int alx_mdio_read(struct net_device *netdev, + int prtad, int devad, u16 addr) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + u16 val; + int err; + + netif_dbg(adpt, hw, netdev, + "alx_mdio_read, prtad=%d, devad=%d, addr=%X\n", + prtad, devad, addr); + + if (prtad != hw->mdio.prtad) + return -EINVAL; + + if (devad != MDIO_DEVAD_NONE) + err = alx_read_phy_ext(hw, devad, addr, &val); + else + err = alx_read_phy_reg(hw, addr, &val); + + return err ? -EIO : val; +} + +static int alx_mdio_write(struct net_device *netdev, + int prtad, int devad, u16 addr, u16 val) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + struct alx_hw *hw = &adpt->hw; + int err; + + netif_dbg(adpt, hw, netdev, + "alx_mdio_write: prtad=%d, devad=%d, addr=%X, val=%X\n", + prtad, devad, addr, val); + + if (prtad != hw->mdio.prtad) + return -EINVAL; + + if (devad != MDIO_DEVAD_NONE) + err = alx_write_phy_ext(hw, devad, addr, val); + else + err = alx_write_phy_reg(hw, addr, val); + + return err ? -EIO : 0; +} + +static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EAGAIN; + + return mdio_mii_ioctl(&adpt->hw.mdio, if_mii(ifr), cmd); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void alx_poll_controller(struct net_device *netdev) +{ + struct alx_adapter *adpt = netdev_priv(netdev); + int i; + + if (ALX_FLAG(adpt, HALT)) + return; + + if (ALX_FLAG(adpt, USING_MSIX)) { + alx_intr_msix_misc(0, adpt); + for (i = 0; i < adpt->nr_napi; i++) + alx_msix_ring(0, adpt->qnapi[i]); + } else if (ALX_FLAG(adpt, USING_MSI)) + alx_intr_msi(0, adpt); + else + alx_intr_legacy(0, adpt); + +} +#endif + +static const struct net_device_ops alx_netdev_ops = { + .ndo_open = alx_open, + .ndo_stop = alx_stop, + .ndo_start_xmit = alx_start_xmit, + .ndo_get_stats = alx_get_stats, + .ndo_set_rx_mode = alx_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = alx_set_mac_address, + .ndo_change_mtu = alx_change_mtu, + .ndo_do_ioctl = alx_ioctl, + .ndo_tx_timeout = alx_tx_timeout, + .ndo_fix_features = alx_fix_features, + .ndo_set_features = alx_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = alx_poll_controller, +#endif +}; + +/* alx_probe - Device Initialization Routine */ +static int +alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct alx_adapter *adpt = NULL; + struct alx_hw *hw; + bool phy_cfged; + int bars, pm_cap, err; + static int cards_found; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device memory\n"); + return err; + } + + /* The alx chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + */ + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA config, aborting\n"); + goto err_dma_mask; + } + } + } + + /* obtain PCI resources */ + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions(pdev, bars, alx_drv_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed(bars:%d)\n", bars); + goto err_pci_region; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + /* find PM capability */ + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm_cap == 0) { + dev_err(&pdev->dev, + "Can't find power management capability, aborting\n"); + err = -EIO; + goto err_pm; + } + err = pci_set_power_state(pdev, PCI_D0); + if (err) { + dev_err(&pdev->dev, "switch to D0 status failed, aborting\n"); + goto err_pm; + } + + /* netdev zeroed in init_etherdev */ + netdev = alloc_etherdev_mqs(sizeof(struct alx_adapter), + ALX_MAX_TX_QUEUES, + ALX_MAX_RX_QUEUES); + if (!netdev) { + dev_err(&pdev->dev, "etherdev_mq alloc failed\n"); + err = -ENOMEM; + goto err_alloc_ethdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + adpt = netdev_priv(netdev); + adpt->netdev = netdev; + adpt->pdev = pdev; + adpt->msg_enable = NETIF_MSG_LINK | + NETIF_MSG_HW | + NETIF_MSG_IFUP | + NETIF_MSG_TX_ERR | + NETIF_MSG_RX_ERR | + NETIF_MSG_WOL; + adpt->bd_number = cards_found; + hw = &adpt->hw; + hw->pdev = pdev; + pci_set_drvdata(pdev, adpt); + + hw->hw_addr = pci_ioremap_bar(pdev, 0); + if (!hw->hw_addr) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -EIO; + goto err_iomap; + } + + netdev->netdev_ops = &alx_netdev_ops; + alx_set_ethtool_ops(netdev); + netdev->irq = pdev->irq; + netdev->watchdog_timeo = ALX_WATCHDOG_TIME; + + /* init alx_adapte structure */ + err = alx_init_sw(adpt); + if (err) { + dev_err(&pdev->dev, "net device private data init failed\n"); + goto err_init_sw; + } + + /* reset pcie */ + alx_reset_pcie(hw); + + /* check if phy already configed by ohter driver */ + phy_cfged = alx_phy_configed(hw); + + /* reset PHY to a known stable status */ + if (!phy_cfged) + alx_reset_phy(hw, !hw->hib_patch); + else + dev_info(&pdev->dev, "PHY has been configured.\n"); + + /* reset mac/dma controller */ + err = alx_reset_mac(hw); + if (err) { + dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); + err = -EIO; + goto err_rst_mac; + } + + /* setup link to put it in a known good starting state */ + if (!phy_cfged) { + err = alx_setup_speed_duplex(hw, + hw->adv_cfg, hw->flowctrl); + if (err) { + dev_err(&pdev->dev, + "config PHY speed/duplex failed,err=%d\n", + err); + err = -EIO; + goto err_setup_link; + } + } + + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_RX | + NETIF_F_TSO | + NETIF_F_TSO6; + netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX; + + /* read permanent mac addr from register or eFuse */ + if (alx_get_perm_macaddr(hw, hw->perm_addr)) { + dev_warn(&pdev->dev, "invalid perm-address, use random one\n"); + eth_hw_addr_random(netdev); + memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); + } + /* using permanent address as current address */ + memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); + memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); + memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); + + /* PHY mdio */ + hw->mdio.prtad = 0; + hw->mdio.mmds = 0; + hw->mdio.dev = netdev; + hw->mdio.mode_support = + MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22 | MDIO_EMULATE_C22; + hw->mdio.mdio_read = alx_mdio_read; + hw->mdio.mdio_write = alx_mdio_write; + if (!alx_get_phy_info(hw)) { + dev_err(&pdev->dev, "identify PHY failed\n"); + err = -EIO; + goto err_id_phy; + } + + INIT_WORK(&adpt->task, alx_task); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "register netdevice failed\n"); + goto err_register_netdev; + } + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl); + cards_found++; + + dev_info(&pdev->dev, + "alx(%pM): Qualcomm Atheros Ethernet Network Connection\n", + netdev->dev_addr); + + return 0; + +err_id_phy: +err_register_netdev: +err_setup_link: +err_rst_mac: +err_init_sw: + iounmap(hw->hw_addr); +err_iomap: + free_netdev(netdev); +err_alloc_ethdev: +err_pm: + pci_release_selected_regions(pdev, bars); +err_pci_region: +err_dma_mask: + pci_disable_device(pdev); + dev_err(&pdev->dev, "error when probe device, error = %d\n", err); + return err; +} + +/* alx_remove - Device Removal Routine */ +static void alx_remove(struct pci_dev *pdev) +{ + struct alx_adapter *adpt = pci_get_drvdata(pdev); + struct alx_hw *hw = &adpt->hw; + struct net_device *netdev; + + if (!adpt) + return; + + netdev = adpt->netdev; + + ALX_FLAG_SET(adpt, HALT); + alx_cancel_work(adpt); + + /* restore permanent mac address */ + alx_set_macaddr(hw, hw->perm_addr); + + unregister_netdev(netdev); + iounmap(hw->hw_addr); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + free_netdev(netdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +/* alx_pci_error_detected */ +static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct alx_adapter *adpt = pci_get_drvdata(pdev); + struct net_device *netdev = adpt->netdev; + pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; + + dev_info(&pdev->dev, "pci error detected\n"); + + rtnl_lock(); + + if (netif_running(netdev)) { + netif_device_detach(netdev); + alx_halt(adpt, false); + } + if (state == pci_channel_io_perm_failure) + rc = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return rc; +} + +/* alx_pci_error_slot_reset */ +static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) +{ + struct alx_adapter *adpt = pci_get_drvdata(pdev); + struct alx_hw *hw = &adpt->hw; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + dev_info(&pdev->dev, "pci error slot reset\n"); + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Re-enable PCI device after reset fail\n"); + goto out; + } + + pci_set_master(pdev); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + alx_reset_pcie(hw); + if (!alx_reset_mac(hw)) + rc = PCI_ERS_RESULT_RECOVERED; +out: + pci_cleanup_aer_uncorrect_error_status(pdev); + + rtnl_unlock(); + + return rc; +} + +/* alx_pci_error_resume */ +static void alx_pci_error_resume(struct pci_dev *pdev) +{ + struct alx_adapter *adpt = pci_get_drvdata(pdev); + struct net_device *netdev = adpt->netdev; + + dev_info(&pdev->dev, "pci error resume\n"); + + rtnl_lock(); + + if (netif_running(netdev)) { + alx_activate(adpt); + netif_device_attach(netdev); + } + + rtnl_unlock(); +} + + +static struct pci_error_handlers alx_err_handler = { + .error_detected = alx_pci_error_detected, + .slot_reset = alx_pci_error_slot_reset, + .resume = alx_pci_error_resume, +}; + +#ifdef CONFIG_PM_SLEEP +static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); +#define ALX_PM_OPS (&alx_pm_ops) +#else +#define ALX_PM_OPS NULL +#endif + +static struct pci_driver alx_driver = { + .name = alx_drv_name, + .id_table = alx_pci_tbl, + .probe = alx_probe, + .remove = alx_remove, + .shutdown = alx_shutdown, + .err_handler = &alx_err_handler, + .driver.pm = ALX_PM_OPS, +}; + + +static int __init alx_init_module(void) +{ + pr_info("%s\n", alx_drv_desc); + return pci_register_driver(&alx_driver); +} +module_init(alx_init_module); + +static void __exit alx_exit_module(void) +{ + pci_unregister_driver(&alx_driver); +} +module_exit(alx_exit_module); diff -Nurp linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_reg.h linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_reg.h --- linux-3.8.11/drivers/net/ethernet/atheros/alx/alx_reg.h 1970-01-01 02:00:00.000000000 +0200 +++ linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/alx_reg.h 2013-05-05 13:51:31.000000000 +0300 @@ -0,0 +1,2295 @@ +/* + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ALX_REG_H +#define ALX_REG_H + +/**********************************************************************/ +/* following registers are mapped to both pci config and memory space */ +/**********************************************************************/ + +#define ALX_VENDOR_ID PCI_VENDOR_ID_ATTANSIC + +/* pci dev-ids */ +#define ALX_DEV_ID_AR8161 0x1091 +#define ALX_DEV_ID_AR8162 0x1090 +#define ALX_DEV_ID_AR8171 0x10A1 +#define ALX_DEV_ID_AR8172 0x10A0 + +/* rev definition, + * bit(0): with xD support + * bit(1): with Card Reader function + * bit(7:2): real revision + */ +#define ALX_PCI_REVID_WTH_CR BIT(1) +#define ALX_PCI_REVID_WTH_XD BIT(0) +#define ALX_PCI_REVID_MASK 0x1FU +#define ALX_PCI_REVID_SHIFT 3 +#define ALX_REV_A0 0 +#define ALX_REV_A1 1 +#define ALX_REV_B0 2 +#define ALX_REV_C0 3 + +#define ALX_PM_CSR 0x0044 +#define ALX_PM_CSR_PME_STAT BIT(15) +#define ALX_PM_CSR_DSCAL_MASK 0x3U +#define ALX_PM_CSR_DSCAL_SHIFT 13 +#define ALX_PM_CSR_DSEL_MASK 0xFU +#define ALX_PM_CSR_DSEL_SHIFT 9 +#define ALX_PM_CSR_PME_EN BIT(8) +#define ALX_PM_CSR_PWST_MASK 0x3U +#define ALX_PM_CSR_PWST_SHIFT 0 + +#define ALX_DEV_CAP 0x005C +#define ALX_DEV_CAP_SPLSL_MASK 0x3UL +#define ALX_DEV_CAP_SPLSL_SHIFT 26 +#define ALX_DEV_CAP_SPLV_MASK 0xFFUL +#define ALX_DEV_CAP_SPLV_SHIFT 18 +#define ALX_DEV_CAP_RBER BIT(15) +#define ALX_DEV_CAP_PIPRS BIT(14) +#define ALX_DEV_CAP_AIPRS BIT(13) +#define ALX_DEV_CAP_ABPRS BIT(12) +#define ALX_DEV_CAP_L1ACLAT_MASK 0x7UL +#define ALX_DEV_CAP_L1ACLAT_SHIFT 9 +#define ALX_DEV_CAP_L0SACLAT_MASK 0x7UL +#define ALX_DEV_CAP_L0SACLAT_SHIFT 6 +#define ALX_DEV_CAP_EXTAG BIT(5) +#define ALX_DEV_CAP_PHANTOM BIT(4) +#define ALX_DEV_CAP_MPL_MASK 0x7UL +#define ALX_DEV_CAP_MPL_SHIFT 0 +#define ALX_DEV_CAP_MPL_128 1 +#define ALX_DEV_CAP_MPL_256 2 +#define ALX_DEV_CAP_MPL_512 3 +#define ALX_DEV_CAP_MPL_1024 4 +#define ALX_DEV_CAP_MPL_2048 5 +#define ALX_DEV_CAP_MPL_4096 6 + +#define ALX_DEV_CTRL 0x0060 +#define ALX_DEV_CTRL_MAXRRS_MASK 0x7U +#define ALX_DEV_CTRL_MAXRRS_SHIFT 12 +#define ALX_DEV_CTRL_MAXRRS_MIN 2 +#define ALX_DEV_CTRL_NOSNP_EN BIT(11) +#define ALX_DEV_CTRL_AUXPWR_EN BIT(10) +#define ALX_DEV_CTRL_PHANTOM_EN BIT(9) +#define ALX_DEV_CTRL_EXTAG_EN BIT(8) +#define ALX_DEV_CTRL_MPL_MASK 0x7U +#define ALX_DEV_CTRL_MPL_SHIFT 5 +#define ALX_DEV_CTRL_RELORD_EN BIT(4) +#define ALX_DEV_CTRL_URR_EN BIT(3) +#define ALX_DEV_CTRL_FERR_EN BIT(2) +#define ALX_DEV_CTRL_NFERR_EN BIT(1) +#define ALX_DEV_CTRL_CERR_EN BIT(0) + +#define ALX_DEV_STAT 0x0062 +#define ALX_DEV_STAT_XS_PEND BIT(5) +#define ALX_DEV_STAT_AUXPWR BIT(4) +#define ALX_DEV_STAT_UR BIT(3) +#define ALX_DEV_STAT_FERR BIT(2) +#define ALX_DEV_STAT_NFERR BIT(1) +#define ALX_DEV_STAT_CERR BIT(0) + +#define ALX_LNK_CAP 0x0064 +#define ALX_LNK_CAP_PRTNUM_MASK 0xFFUL +#define ALX_LNK_CAP_PRTNUM_SHIFT 24 +#define ALX_LNK_CAP_CLK_PM BIT(18) +#define ALX_LNK_CAP_L1EXTLAT_MASK 0x7UL +#define ALX_LNK_CAP_L1EXTLAT_SHIFT 15 +#define ALX_LNK_CAP_L0SEXTLAT_MASK 0x7UL +#define ALX_LNK_CAP_L0SEXTLAT_SHIFT 12 +#define ALX_LNK_CAP_ASPM_SUP_MASK 0x3UL +#define ALX_LNK_CAP_ASPM_SUP_SHIFT 10 +#define ALX_LNK_CAP_ASPM_SUP_L0S 1 +#define ALX_LNK_CAP_ASPM_SUP_L0SL1 3 +#define ALX_LNK_CAP_MAX_LWH_MASK 0x3FUL +#define ALX_LNK_CAP_MAX_LWH_SHIFT 4 +#define ALX_LNK_CAP_MAX_LSPD_MASK 0xFUL +#define ALX_LNK_CAP_MAX_LSPD_SHIFT 0 + +#define ALX_LNK_CTRL 0x0068 +#define ALX_LNK_CTRL_CLK_PM_EN BIT(8) +#define ALX_LNK_CTRL_EXTSYNC BIT(7) +#define ALX_LNK_CTRL_CMNCLK_CFG BIT(6) +#define ALX_LNK_CTRL_RCB_128B BIT(3) +#define ALX_LNK_CTRL_ASPM_MASK 0x3U +#define ALX_LNK_CTRL_ASPM_SHIFT 0 +#define ALX_LNK_CTRL_ASPM_DIS 0 +#define ALX_LNK_CTRL_ASPM_ENL0S 1 +#define ALX_LNK_CTRL_ASPM_ENL1 2 +#define ALX_LNK_CTRL_ASPM_ENL0SL1 3 + +#define ALX_LNK_STAT 0x006A +#define ALX_LNK_STAT_SCLKCFG BIT(12) +#define ALX_LNK_STAT_LNKTRAIN BIT(11) +#define ALX_LNK_STAT_TRNERR BIT(10) +#define ALX_LNK_STAT_LNKSPD_MASK 0xFU +#define ALX_LNK_STAT_LNKSPD_SHIFT 0 +#define ALX_LNK_STAT_NEGLW_MASK 0x3FU +#define ALX_LNK_STAT_NEGLW_SHIFT 4 + +#define ALX_MSIX_MASK 0x0090 +#define ALX_MSIX_PENDING 0x0094 + +#define ALX_UE_SVRT 0x010C +#define ALX_UE_SVRT_UR BIT(20) +#define ALX_UE_SVRT_ECRCERR BIT(19) +#define ALX_UE_SVRT_MTLP BIT(18) +#define ALX_UE_SVRT_RCVOVFL BIT(17) +#define ALX_UE_SVRT_UNEXPCPL BIT(16) +#define ALX_UE_SVRT_CPLABRT BIT(15) +#define ALX_UE_SVRT_CPLTO BIT(14) +#define ALX_UE_SVRT_FCPROTERR BIT(13) +#define ALX_UE_SVRT_PTLP BIT(12) +#define ALX_UE_SVRT_DLPROTERR BIT(4) +#define ALX_UE_SVRT_TRNERR BIT(0) + +/* eeprom & flash load register */ +#define ALX_EFLD 0x0204 +#define ALX_EFLD_F_ENDADDR_MASK 0x3FFUL +#define ALX_EFLD_F_ENDADDR_SHIFT 16 +#define ALX_EFLD_F_EXIST BIT(10) +#define ALX_EFLD_E_EXIST BIT(9) +#define ALX_EFLD_EXIST BIT(8) +#define ALX_EFLD_STAT BIT(5) +#define ALX_EFLD_IDLE BIT(4) +#define ALX_EFLD_START BIT(0) + +/* eFuse load register */ +#define ALX_SLD 0x0218 +#define ALX_SLD_FREQ_MASK 0x3UL +#define ALX_SLD_FREQ_SHIFT 24 +#define ALX_SLD_FREQ_100K 0 +#define ALX_SLD_FREQ_200K 1 +#define ALX_SLD_FREQ_300K 2 +#define ALX_SLD_FREQ_400K 3 +#define ALX_SLD_EXIST BIT(23) +#define ALX_SLD_SLVADDR_MASK 0x7FUL +#define ALX_SLD_SLVADDR_SHIFT 16 +#define ALX_SLD_IDLE BIT(13) +#define ALX_SLD_STAT BIT(12) +#define ALX_SLD_START BIT(11) +#define ALX_SLD_STARTADDR_MASK 0xFFUL +#define ALX_SLD_STARTADDR_SHIFT 0 +#define ALX_SLD_MAX_TO 100 + +#define ALX_PCIE_MSIC 0x021C +#define ALX_PCIE_MSIC_MSIX_DIS BIT(22) +#define ALX_PCIE_MSIC_MSI_DIS BIT(21) + +#define ALX_PPHY_MISC1 0x1000 +#define ALX_PPHY_MISC1_RCVDET BIT(2) +#define ALX_PPHY_MISC1_NFTS_MASK 0xFFUL +#define ALX_PPHY_MISC1_NFTS_SHIFT 16 +#define ALX_PPHY_MISC1_NFTS_HIPERF 0xA0 + +#define ALX_PPHY_MISC2 0x1004 +#define ALX_PPHY_MISC2_L0S_TH_MASK 0x3UL +#define ALX_PPHY_MISC2_L0S_TH_SHIFT 18 +#define ALX_PPHY_MISC2_CDR_BW_MASK 0x3UL +#define ALX_PPHY_MISC2_CDR_BW_SHIFT 16 + +#define ALX_PDLL_TRNS1 0x1104 +#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11) +#define ALX_PDLL_TRNS1_REGCLK_SEL_NORM BIT(10) +#define ALX_PDLL_TRNS1_REPLY_TO_MASK 0x3FFUL +#define ALX_PDLL_TRNS1_REPLY_TO_SHIFT 0 + +#define ALX_TLEXTN_STATS 0x1208 +#define ALX_TLEXTN_STATS_DEVNO_MASK 0x1FUL +#define ALX_TLEXTN_STATS_DEVNO_SHIFT 16 +#define ALX_TLEXTN_STATS_BUSNO_MASK 0xFFUL +#define ALX_TLEXTN_STATS_BUSNO_SHIFT 8 + +#define ALX_EFUSE_CTRL 0x12C0 +#define ALX_EFUSE_CTRL_FLAG BIT(31) +#define ALX_EUFSE_CTRL_ACK BIT(30) +#define ALX_EFUSE_CTRL_ADDR_MASK 0x3FFUL +#define ALX_EFUSE_CTRL_ADDR_SHIFT 16 + +#define ALX_EFUSE_DATA 0x12C4 + +#define ALX_SPI_OP1 0x12C8 +#define ALX_SPI_OP1_RDID_MASK 0xFFUL +#define ALX_SPI_OP1_RDID_SHIFT 24 +#define ALX_SPI_OP1_CE_MASK 0xFFUL +#define ALX_SPI_OP1_CE_SHIFT 16 +#define ALX_SPI_OP1_SE_MASK 0xFFUL +#define ALX_SPI_OP1_SE_SHIFT 8 +#define ALX_SPI_OP1_PRGRM_MASK 0xFFUL +#define ALX_SPI_OP1_PRGRM_SHIFT 0 + +#define ALX_SPI_OP2 0x12CC +#define ALX_SPI_OP2_READ_MASK 0xFFUL +#define ALX_SPI_OP2_READ_SHIFT 24 +#define ALX_SPI_OP2_WRSR_MASK 0xFFUL +#define ALX_SPI_OP2_WRSR_SHIFT 16 +#define ALX_SPI_OP2_RDSR_MASK 0xFFUL +#define ALX_SPI_OP2_RDSR_SHIFT 8 +#define ALX_SPI_OP2_WREN_MASK 0xFFUL +#define ALX_SPI_OP2_WREN_SHIFT 0 + +#define ALX_SPI_OP3 0x12E4 +#define ALX_SPI_OP3_WRDI_MASK 0xFFUL +#define ALX_SPI_OP3_WRDI_SHIFT 8 +#define ALX_SPI_OP3_EWSR_MASK 0xFFUL +#define ALX_SPI_OP3_EWSR_SHIFT 0 + +#define ALX_EF_CTRL 0x12D0 +#define ALX_EF_CTRL_FSTS_MASK 0xFFUL +#define ALX_EF_CTRL_FSTS_SHIFT 20 +#define ALX_EF_CTRL_CLASS_MASK 0x7UL +#define ALX_EF_CTRL_CLASS_SHIFT 16 +#define ALX_EF_CTRL_CLASS_F_UNKNOWN 0 +#define ALX_EF_CTRL_CLASS_F_STD 1 +#define ALX_EF_CTRL_CLASS_F_SST 2 +#define ALX_EF_CTRL_CLASS_E_UNKNOWN 0 +#define ALX_EF_CTRL_CLASS_E_1K 1 +#define ALX_EF_CTRL_CLASS_E_4K 2 +#define ALX_EF_CTRL_FRET BIT(15) +#define ALX_EF_CTRL_TYP_MASK 0x3UL +#define ALX_EF_CTRL_TYP_SHIFT 12 +#define ALX_EF_CTRL_TYP_NONE 0 +#define ALX_EF_CTRL_TYP_F 1 +#define ALX_EF_CTRL_TYP_E 2 +#define ALX_EF_CTRL_TYP_UNKNOWN 3 +#define ALX_EF_CTRL_ONE_CLK BIT(10) +#define ALX_EF_CTRL_ECLK_MASK 0x3UL +#define ALX_EF_CTRL_ECLK_SHIFT 8 +#define ALX_EF_CTRL_ECLK_125K 0 +#define ALX_EF_CTRL_ECLK_250K 1 +#define ALX_EF_CTRL_ECLK_500K 2 +#define ALX_EF_CTRL_ECLK_1M 3 +#define ALX_EF_CTRL_FBUSY BIT(7) +#define ALX_EF_CTRL_ACTION BIT(6) +#define ALX_EF_CTRL_AUTO_OP BIT(5) +#define ALX_EF_CTRL_SST_MODE BIT(4) +#define ALX_EF_CTRL_INST_MASK 0xFUL +#define ALX_EF_CTRL_INST_SHIFT 0 +#define ALX_EF_CTRL_INST_NONE 0 +#define ALX_EF_CTRL_INST_READ 1 +#define ALX_EF_CTRL_INST_RDID 2 +#define ALX_EF_CTRL_INST_RDSR 3 +#define ALX_EF_CTRL_INST_WREN 4 +#define ALX_EF_CTRL_INST_PRGRM 5 +#define ALX_EF_CTRL_INST_SE 6 +#define ALX_EF_CTRL_INST_CE 7 +#define ALX_EF_CTRL_INST_WRSR 10 +#define ALX_EF_CTRL_INST_EWSR 11 +#define ALX_EF_CTRL_INST_WRDI 12 +#define ALX_EF_CTRL_INST_WRITE 2 + +#define ALX_EF_ADDR 0x12D4 +#define ALX_EF_DATA 0x12D8 +#define ALX_SPI_ID 0x12DC + +#define ALX_SPI_CFG_START 0x12E0 + +#define ALX_PMCTRL 0x12F8 +#define ALX_PMCTRL_HOTRST_WTEN BIT(31) +/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */ +#define ALX_PMCTRL_ASPM_FCEN BIT(30) +#define ALX_PMCTRL_SADLY_EN BIT(29) +#define ALX_PMCTRL_L0S_BUFSRX_EN BIT(28) +#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xFUL +#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24 +#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC +/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */ +#define ALX_PMCTRL_L1REQ_TO_MASK 0xFUL +#define ALX_PMCTRL_L1REQ_TO_SHIFT 20 +#define ALX_PMCTRL_L1REG_TO_DEF 0xF +#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19) +#define ALX_PMCTRL_L1_TIMER_MASK 0x7UL +#define ALX_PMCTRL_L1_TIMER_SHIFT 16 +#define ALX_PMCTRL_L1_TIMER_DIS 0 +#define ALX_PMCTRL_L1_TIMER_2US 1 +#define ALX_PMCTRL_L1_TIMER_4US 2 +#define ALX_PMCTRL_L1_TIMER_8US 3 +#define ALX_PMCTRL_L1_TIMER_16US 4 +#define ALX_PMCTRL_L1_TIMER_24US 5 +#define ALX_PMCTRL_L1_TIMER_32US 6 +#define ALX_PMCTRL_L1_TIMER_63US 7 +#define ALX_PMCTRL_RCVR_WT_1US BIT(15) +#define ALX_PMCTRL_PWM_VER_11 BIT(14) +/* bit13: enable pcie clk switch in L1 state */ +#define ALX_PMCTRL_L1_CLKSW_EN BIT(13) +#define ALX_PMCTRL_L0S_EN BIT(12) +#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11) +#define ALX_PMCTRL_L0S_TIMER_MASK 0x7UL +#define ALX_PMCTRL_L0S_TIMER_SHIFT 8 +#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7) +/* bit6: power down serdes RX */ +#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6) +#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5) +#define ALX_PMCTRL_L1_SRDS_EN BIT(4) +#define ALX_PMCTRL_L1_EN BIT(3) +#define ALX_PMCTRL_CLKREQ_EN BIT(2) +#define ALX_PMCTRL_RBER_EN BIT(1) +#define ALX_PMCTRL_SPRSDWER_EN BIT(0) + +#define ALX_LTSSM_CTRL 0x12FC +#define ALX_LTSSM_WRO_EN BIT(12) + +/*******************************************************/ +/* following registers are mapped only to memory space */ +/*******************************************************/ + +#define ALX_MASTER 0x1400 +#define ALX_MASTER_OTP_FLG BIT(31) +#define ALX_MASTER_DEV_NUM_MASK 0x7FUL +#define ALX_MASTER_DEV_NUM_SHIFT 24 +#define ALX_MASTER_REV_NUM_MASK 0xFFUL +#define ALX_MASTER_REV_NUM_SHIFT 16 +#define ALX_MASTER_DEASSRT BIT(15) +#define ALX_MASTER_RDCLR_INT BIT(14) +#define ALX_MASTER_DMA_RST BIT(13) +/* bit12: 1:alwys select pclk from serdes, not sw to 25M */ +#define ALX_MASTER_PCLKSEL_SRDS BIT(12) +/* bit11: irq moduration for rx */ +#define ALX_MASTER_IRQMOD2_EN BIT(11) +/* bit10: irq moduration for tx/rx */ +#define ALX_MASTER_IRQMOD1_EN BIT(10) +#define ALX_MASTER_MANU_INT BIT(9) +#define ALX_MASTER_MANUTIMER_EN BIT(8) +#define ALX_MASTER_SYSALVTIMER_EN BIT(7) +#define ALX_MASTER_OOB_DIS BIT(6) +/* bit5: wakeup without pcie clk */ +#define ALX_MASTER_WAKEN_25M BIT(5) +#define ALX_MASTER_BERT_START BIT(4) +#define ALX_MASTER_PCIE_TSTMOD_MASK 0x3UL +#define ALX_MASTER_PCIE_TSTMOD_SHIFT 2 +#define ALX_MASTER_PCIE_RST BIT(1) +/* bit0: MAC & DMA reset */ +#define ALX_MASTER_DMA_MAC_RST BIT(0) +#define ALX_DMA_MAC_RST_TO 50 + +#define ALX_MANU_TIMER 0x1404 + +#define ALX_IRQ_MODU_TIMER 0x1408 +/* hi-16bit is only for RX */ +#define ALX_IRQ_MODU_TIMER2_MASK 0xFFFFUL +#define ALX_IRQ_MODU_TIMER2_SHIFT 16 +#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFFUL +#define ALX_IRQ_MODU_TIMER1_SHIFT 0 + +#define ALX_PHY_CTRL 0x140C +#define ALX_PHY_CTRL_ADDR_MASK 0x1FUL +#define ALX_PHY_CTRL_ADDR_SHIFT 19 +#define ALX_PHY_CTRL_BP_VLTGSW BIT(18) +#define ALX_PHY_CTRL_100AB_EN BIT(17) +#define ALX_PHY_CTRL_10AB_EN BIT(16) +#define ALX_PHY_CTRL_PLL_BYPASS BIT(15) +/* bit14: affect MAC & PHY, go to low power sts */ +#define ALX_PHY_CTRL_POWER_DOWN BIT(14) +/* bit13: 1:pll always ON, 0:can switch in lpw */ +#define ALX_PHY_CTRL_PLL_ON BIT(13) +#define ALX_PHY_CTRL_RST_ANALOG BIT(12) +#define ALX_PHY_CTRL_HIB_PULSE BIT(11) +#define ALX_PHY_CTRL_HIB_EN BIT(10) +#define ALX_PHY_CTRL_GIGA_DIS BIT(9) +/* bit8: poweron rst */ +#define ALX_PHY_CTRL_IDDQ_DIS BIT(8) +/* bit7: while reboot, it affects bit8 */ +#define ALX_PHY_CTRL_IDDQ BIT(7) +#define ALX_PHY_CTRL_LPW_EXIT BIT(6) +#define ALX_PHY_CTRL_GATE_25M BIT(5) +#define ALX_PHY_CTRL_RVRS_ANEG BIT(4) +#define ALX_PHY_CTRL_ANEG_NOW BIT(3) +#define ALX_PHY_CTRL_LED_MODE BIT(2) +#define ALX_PHY_CTRL_RTL_MODE BIT(1) +/* bit0: out of dsp RST state */ +#define ALX_PHY_CTRL_DSPRST_OUT BIT(0) +#define ALX_PHY_CTRL_DSPRST_TO 80 +#define ALX_PHY_CTRL_CLS (\ + ALX_PHY_CTRL_LED_MODE |\ + ALX_PHY_CTRL_100AB_EN |\ + ALX_PHY_CTRL_PLL_ON) + +#define ALX_MAC_STS 0x1410 +#define ALX_MAC_STS_SFORCE_MASK 0xFUL +#define ALX_MAC_STS_SFORCE_SHIFT 14 +#define ALX_MAC_STS_CALIB_DONE BIT13 +#define ALX_MAC_STS_CALIB_RES_MASK 0x1FUL +#define ALX_MAC_STS_CALIB_RES_SHIFT 8 +#define ALX_MAC_STS_CALIBERR_MASK 0xFUL +#define ALX_MAC_STS_CALIBERR_SHIFT 4 +#define ALX_MAC_STS_TXQ_BUSY BIT(3) +#define ALX_MAC_STS_RXQ_BUSY BIT(2) +#define ALX_MAC_STS_TXMAC_BUSY BIT(1) +#define ALX_MAC_STS_RXMAC_BUSY BIT(0) +#define ALX_MAC_STS_IDLE (\ + ALX_MAC_STS_TXQ_BUSY |\ + ALX_MAC_STS_RXQ_BUSY |\ + ALX_MAC_STS_TXMAC_BUSY |\ + ALX_MAC_STS_RXMAC_BUSY) + +#define ALX_MDIO 0x1414 +#define ALX_MDIO_MODE_EXT BIT(30) +#define ALX_MDIO_POST_READ BIT(29) +#define ALX_MDIO_AUTO_POLLING BIT(28) +#define ALX_MDIO_BUSY BIT(27) +#define ALX_MDIO_CLK_SEL_MASK 0x7UL +#define ALX_MDIO_CLK_SEL_SHIFT 24 +#define ALX_MDIO_CLK_SEL_25MD4 0 +#define ALX_MDIO_CLK_SEL_25MD6 2 +#define ALX_MDIO_CLK_SEL_25MD8 3 +#define ALX_MDIO_CLK_SEL_25MD10 4 +#define ALX_MDIO_CLK_SEL_25MD32 5 +#define ALX_MDIO_CLK_SEL_25MD64 6 +#define ALX_MDIO_CLK_SEL_25MD128 7 +#define ALX_MDIO_START BIT(23) +#define ALX_MDIO_SPRES_PRMBL BIT(22) +/* bit21: 1:read,0:write */ +#define ALX_MDIO_OP_READ BIT(21) +#define ALX_MDIO_REG_MASK 0x1FUL +#define ALX_MDIO_REG_SHIFT 16 +#define ALX_MDIO_DATA_MASK 0xFFFFUL +#define ALX_MDIO_DATA_SHIFT 0 +#define ALX_MDIO_MAX_AC_TO 120 + +#define ALX_MDIO_EXTN 0x1448 +#define ALX_MDIO_EXTN_PORTAD_MASK 0x1FUL +#define ALX_MDIO_EXTN_PORTAD_SHIFT 21 +#define ALX_MDIO_EXTN_DEVAD_MASK 0x1FUL +#define ALX_MDIO_EXTN_DEVAD_SHIFT 16 +#define ALX_MDIO_EXTN_REG_MASK 0xFFFFUL +#define ALX_MDIO_EXTN_REG_SHIFT 0 + +#define ALX_PHY_STS 0x1418 +#define ALX_PHY_STS_LPW BIT(31) +#define ALX_PHY_STS_LPI BIT(30) +#define ALX_PHY_STS_PWON_STRIP_MASK 0xFFFUL +#define ALX_PHY_STS_PWON_STRIP_SHIFT 16 + +#define ALX_PHY_STS_DUPLEX BIT(3) +#define ALX_PHY_STS_LINKUP BIT(2) +#define ALX_PHY_STS_SPEED_MASK 0x3UL +#define ALX_PHY_STS_SPEED_SHIFT 0 +#define ALX_PHY_STS_SPEED_1000M 2 +#define ALX_PHY_STS_SPEED_100M 1 +#define ALX_PHY_STS_SPEED_10M 0 + +#define ALX_BIST0 0x141C +#define ALX_BIST0_COL_MASK 0x3FUL +#define ALX_BIST0_COL_SHIFT 24 +#define ALX_BIST0_ROW_MASK 0xFFFUL +#define ALX_BIST0_ROW_SHIFT 12 +#define ALX_BIST0_STEP_MASK 0xFUL +#define ALX_BIST0_STEP_SHIFT 8 +#define ALX_BIST0_PATTERN_MASK 0x7UL +#define ALX_BIST0_PATTERN_SHIFT 4 +#define ALX_BIST0_CRIT BIT(3) +#define ALX_BIST0_FIXED BIT(2) +#define ALX_BIST0_FAIL BIT(1) +#define ALX_BIST0_START BIT(0) + +#define ALX_BIST1 0x1420 +#define ALX_BIST1_COL_MASK 0x3FUL +#define ALX_BIST1_COL_SHIFT 24 +#define ALX_BIST1_ROW_MASK 0xFFFUL +#define ALX_BIST1_ROW_SHIFT 12 +#define ALX_BIST1_STEP_MASK 0xFUL +#define ALX_BIST1_STEP_SHIFT 8 +#define ALX_BIST1_PATTERN_MASK 0x7UL +#define ALX_BIST1_PATTERN_SHIFT 4 +#define ALX_BIST1_CRIT BIT(3) +#define ALX_BIST1_FIXED BIT(2) +#define ALX_BIST1_FAIL BIT(1) +#define ALX_BIST1_START BIT(0) + +#define ALX_SERDES 0x1424 +#define ALX_SERDES_PHYCLK_SLWDWN BIT(18) +#define ALX_SERDES_MACCLK_SLWDWN BIT(17) +#define ALX_SERDES_SELFB_PLL_MASK 0x3UL +#define ALX_SERDES_SELFB_PLL_SHIFT 14 +/* bit13: 1:gtx_clk, 0:25M */ +#define ALX_SERDES_PHYCLK_SEL_GTX BIT(13) +/* bit12: 1:serdes,0:25M */ +#define ALX_SERDES_PCIECLK_SEL_SRDS BIT(12) +#define ALX_SERDES_BUFS_RX_EN BIT(11) +#define ALX_SERDES_PD_RX BIT(10) +#define ALX_SERDES_PLL_EN BIT(9) +#define ALX_SERDES_EN BIT(8) +/* bit6: 0:state-machine,1:csr */ +#define ALX_SERDES_SELFB_PLL_SEL_CSR BIT(6) +#define ALX_SERDES_SELFB_PLL_CSR_MASK 0x3UL +#define ALX_SERDES_SELFB_PLL_CSR_SHIFT 4 +/* 4-12% OV-CLK */ +#define ALX_SERDES_SELFB_PLL_CSR_4 3 +/* 0-4% OV-CLK */ +#define ALX_SERDES_SELFB_PLL_CSR_0 2 +/* 12-18% OV-CLK */ +#define ALX_SERDES_SELFB_PLL_CSR_12 1 +/* 18-25% OV-CLK */ +#define ALX_SERDES_SELFB_PLL_CSR_18 0 +#define ALX_SERDES_VCO_SLOW BIT(3) +#define ALX_SERDES_VCO_FAST BIT(2) +#define ALX_SERDES_LOCKDCT_EN BIT(1) +#define ALX_SERDES_LOCKDCTED BIT(0) + +#define ALX_LED_CTRL 0x1428 +#define ALX_LED_CTRL_PATMAP2_MASK 0x3UL +#define ALX_LED_CTRL_PATMAP2_SHIFT 8 +#define ALX_LED_CTRL_PATMAP1_MASK 0x3UL +#define ALX_LED_CTRL_PATMAP1_SHIFT 6 +#define ALX_LED_CTRL_PATMAP0_MASK 0x3UL +#define ALX_LED_CTRL_PATMAP0_SHIFT 4 +#define ALX_LED_CTRL_D3_MODE_MASK 0x3UL +#define ALX_LED_CTRL_D3_MODE_SHIFT 2 +#define ALX_LED_CTRL_D3_MODE_NORMAL 0 +#define ALX_LED_CTRL_D3_MODE_WOL_DIS 1 +#define ALX_LED_CTRL_D3_MODE_WOL_ANY 2 +#define ALX_LED_CTRL_D3_MODE_WOL_EN 3 +#define ALX_LED_CTRL_DUTY_CYCL_MASK 0x3UL +#define ALX_LED_CTRL_DUTY_CYCL_SHIFT 0 +/* 50% */ +#define ALX_LED_CTRL_DUTY_CYCL_50 0 +/* 12.5% */ +#define ALX_LED_CTRL_DUTY_CYCL_125 1 +/* 25% */ +#define ALX_LED_CTRL_DUTY_CYCL_25 2 +/* 75% */ +#define ALX_LED_CTRL_DUTY_CYCL_75 3 + +#define ALX_LED_PATN 0x142C +#define ALX_LED_PATN1_MASK 0xFFFFUL +#define ALX_LED_PATN1_SHIFT 16 +#define ALX_LED_PATN0_MASK 0xFFFFUL +#define ALX_LED_PATN0_SHIFT 0 + +#define ALX_LED_PATN2 0x1430 +#define ALX_LED_PATN2_MASK 0xFFFFUL +#define ALX_LED_PATN2_SHIFT 0 + +#define ALX_SYSALV 0x1434 +#define ALX_SYSALV_FLAG BIT(0) + +#define ALX_PCIERR_INST 0x1438 +#define ALX_PCIERR_INST_TX_RATE_MASK 0xFUL +#define ALX_PCIERR_INST_TX_RATE_SHIFT 4 +#define ALX_PCIERR_INST_RX_RATE_MASK 0xFUL +#define ALX_PCIERR_INST_RX_RATE_SHIFT 0 + +#define ALX_LPI_DECISN_TIMER 0x143C + +#define ALX_LPI_CTRL 0x1440 +#define ALX_LPI_CTRL_CHK_DA BIT(31) +#define ALX_LPI_CTRL_ENH_TO_MASK 0x1FFFUL +#define ALX_LPI_CTRL_ENH_TO_SHIFT 12 +#define ALX_LPI_CTRL_ENH_TH_MASK 0x1FUL +#define ALX_LPI_CTRL_ENH_TH_SHIFT 6 +#define ALX_LPI_CTRL_ENH_EN BIT(5) +#define ALX_LPI_CTRL_CHK_RX BIT(4) +#define ALX_LPI_CTRL_CHK_STATE BIT(3) +#define ALX_LPI_CTRL_GMII BIT(2) +#define ALX_LPI_CTRL_TO_PHY BIT(1) +#define ALX_LPI_CTRL_EN BIT(0) + +#define ALX_LPI_WAIT 0x1444 +#define ALX_LPI_WAIT_TIMER_MASK 0xFFFFUL +#define ALX_LPI_WAIT_TIMER_SHIFT 0 + +/* heart-beat, for swoi/cifs */ +#define ALX_HRTBT_VLAN 0x1450 +#define ALX_HRTBT_VLANID_MASK 0xFFFFUL +#define ALX_HRRBT_VLANID_SHIFT 0 + +#define ALX_HRTBT_CTRL 0x1454 +#define ALX_HRTBT_CTRL_EN BIT(31) +#define ALX_HRTBT_CTRL_PERIOD_MASK 0x3FUL +#define ALX_HRTBT_CTRL_PERIOD_SHIFT 25 +#define ALX_HRTBT_CTRL_HASVLAN BIT(24) +#define ALX_HRTBT_CTRL_HDRADDR_MASK 0xFFFUL +#define ALX_HRTBT_CTRL_HDRADDR_SHIFT 12 +#define ALX_HRTBT_CTRL_HDRADDRB0_MASK 0x7FFUL +#define ALX_HRTBT_CTRL_HDRADDRB0_SHIFT 13 +#define ALX_HRTBT_CTRL_PKT_FRAG BIT(12) +#define ALX_HRTBT_CTRL_PKTLEN_MASK 0xFFFUL +#define ALX_HRTBT_CTRL_PKTLEN_SHIFT 0 + +/* for B0+, bit[13..] for C0+ */ +#define ALX_HRTBT_EXT_CTRL 0x1AD0 +#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3FUL +#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24 +#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13) +#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12) +#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFFUL +#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4 +#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3) +#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2) +#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1) +#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0) + +#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4 +#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478 +#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8 +#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC +#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0 +#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4 + +/* 1B8C ~ 1B94 for C0+ */ +#define ALX_SWOI_ACER_CTRL 0x1B8C +#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20) +#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFFUL +#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12 +#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFFUL +#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0 + +#define ALX_SWOI_IOAC_CTRL_2 0x1B90 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFFUL +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFFUL +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFFUL +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0 + +#define ALX_SWOI_IOAC_CTRL_3 0x1B94 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFFUL +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFFUL +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFFUL +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0 + +/*SWOI_HOST_IPV6_ADDR reuse reg1a60-1a6c, 1a70-1a7c, 1aa0-1aac, 1ab0-1abc.*/ +#define ALX_HRTBT_WAKEUP_PORT 0x1AE8 +#define ALX_HRTBT_WAKEUP_PORT_SRC_MASK 0xFFFFUL +#define ALX_HRTBT_WAKEUP_PORT_SRC_SHIFT 16 +#define ALX_HRTBT_WAKEUP_PORT_DEST_MASK 0xFFFFUL +#define ALX_HRTBT_WAKEUP_PORT_DEST_SHIFT 0 + +#define ALX_HRTBT_WAKEUP_DATA7 0x1AEC +#define ALX_HRTBT_WAKEUP_DATA6 0x1AF0 +#define ALX_HRTBT_WAKEUP_DATA5 0x1AF4 +#define ALX_HRTBT_WAKEUP_DATA4 0x1AF8 +#define ALX_HRTBT_WAKEUP_DATA3 0x1AFC +#define ALX_HRTBT_WAKEUP_DATA2 0x1B80 +#define ALX_HRTBT_WAKEUP_DATA1 0x1B84 +#define ALX_HRTBT_WAKEUP_DATA0 0x1B88 + +#define ALX_RXPARSE 0x1458 +#define ALX_RXPARSE_FLT6_L4_MASK 0x3UL +#define ALX_RXPARSE_FLT6_L4_SHIFT 30 +#define ALX_RXPARSE_FLT6_L3_MASK 0x3UL +#define ALX_RXPARSE_FLT6_L3_SHIFT 28 +#define ALX_RXPARSE_FLT5_L4_MASK 0x3UL +#define ALX_RXPARSE_FLT5_L4_SHIFT 26 +#define ALX_RXPARSE_FLT5_L3_MASK 0x3UL +#define ALX_RXPARSE_FLT5_L3_SHIFT 24 +#define ALX_RXPARSE_FLT4_L4_MASK 0x3UL +#define ALX_RXPARSE_FLT4_L4_SHIFT 22 +#define ALX_RXPARSE_FLT4_L3_MASK 0x3UL +#define ALX_RXPARSE_FLT4_L3_SHIFT 20 +#define ALX_RXPARSE_FLT3_L4_MASK 0x3UL +#define ALX_RXPARSE_FLT3_L4_SHIFT 18 +#define ALX_RXPARSE_FLT3_L3_MASK 0x3UL +#define ALX_RXPARSE_FLT3_L3_SHIFT 16 +#define ALX_RXPARSE_FLT2_L4_MASK 0x3UL +#define ALX_RXPARSE_FLT2_L4_SHIFT 14 +#define ALX_RXPARSE_FLT2_L3_MASK 0x3UL +#define ALX_RXPARSE_FLT2_L3_SHIFT 12 +#define ALX_RXPARSE_FLT1_L4_MASK 0x3UL +#define ALX_RXPARSE_FLT1_L4_SHIFT 10 +#define ALX_RXPARSE_FLT1_L3_MASK 0x3UL +#define ALX_RXPARSE_FLT1_L3_SHIFT 8 +#define ALX_RXPARSE_FLT6_EN BIT(5) +#define ALX_RXPARSE_FLT5_EN BIT(4) +#define ALX_RXPARSE_FLT4_EN BIT(3) +#define ALX_RXPARSE_FLT3_EN BIT(2) +#define ALX_RXPARSE_FLT2_EN BIT(1) +#define ALX_RXPARSE_FLT1_EN BIT(0) +#define ALX_RXPARSE_FLT_L4_UDP 0 +#define ALX_RXPARSE_FLT_L4_TCP 1 +#define ALX_RXPARSE_FLT_L4_BOTH 2 +#define ALX_RXPARSE_FLT_L4_NONE 3 +#define ALX_RXPARSE_FLT_L3_IPV6 0 +#define ALX_RXPARSE_FLT_L3_IPV4 1 +#define ALX_RXPARSE_FLT_L3_BOTH 2 + +/* Terodo support */ +#define ALX_TRD_CTRL 0x145C +#define ALX_TRD_CTRL_EN BIT(31) +#define ALX_TRD_CTRL_BUBBLE_WAKE_EN BIT(30) +#define ALX_TRD_CTRL_PREFIX_CMP_HW BIT(28) +#define ALX_TRD_CTRL_RSHDR_ADDR_MASK 0xFFFUL +#define ALX_TRD_CTRL_RSHDR_ADDR_SHIFT 16 +#define ALX_TRD_CTRL_SINTV_MAX_MASK 0xFFUL +#define ALX_TRD_CTRL_SINTV_MAX_SHIFT 8 +#define ALX_TRD_CTRL_SINTV_MIN_MASK 0xFFUL +#define ALX_TRD_CTRL_SINTV_MIN_SHIFT 0 + +#define ALX_TRD_RS 0x1460 +#define ALX_TRD_RS_SZ_MASK 0xFFFUL +#define ALX_TRD_RS_SZ_SHIFT 20 +#define ALX_TRD_RS_NONCE_OFS_MASK 0xFFFUL +#define ALX_TRD_RS_NONCE_OFS_SHIFT 8 +#define ALX_TRD_RS_SEQ_OFS_MASK 0xFFUL +#define ALX_TRD_RS_SEQ_OFS_SHIFT 0 + +#define ALX_TRD_SRV_IP4 0x1464 + +#define ALX_TRD_CLNT_EXTNL_IP4 0x1468 + +#define ALX_TRD_PORT 0x146C +#define ALX_TRD_PORT_CLNT_EXTNL_MASK 0xFFFFUL +#define ALX_TRD_PORT_CLNT_EXTNL_SHIFT 16 +#define ALX_TRD_PORT_SRV_MASK 0xFFFFUL +#define ALX_TRD_PORT_SRV_SHIFT 0 + +#define ALX_TRD_PREFIX 0x1470 + +#define ALX_TRD_BUBBLE_DA_IP4 0x1478 + +#define ALX_TRD_BUBBLE_DA_PORT 0x147C + +/* for B0 */ +#define ALX_IDLE_DECISN_TIMER 0x1474 +/* 1ms */ +#define ALX_IDLE_DECISN_TIMER_DEF 0x400 + + +#define ALX_MAC_CTRL 0x1480 +#define ALX_MAC_CTRL_FAST_PAUSE BIT(31) +#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30) +/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/ +#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29) +#define ALX_MAC_CTRL_SPAUSE_EN BIT(28) +#define ALX_MAC_CTRL_DBG_EN BIT(27) +#define ALX_MAC_CTRL_BRD_EN BIT(26) +#define ALX_MAC_CTRL_MULTIALL_EN BIT(25) +#define ALX_MAC_CTRL_RX_XSUM_EN BIT(24) +#define ALX_MAC_CTRL_THUGE BIT(23) +#define ALX_MAC_CTRL_MBOF BIT(22) +#define ALX_MAC_CTRL_SPEED_MASK 0x3UL +#define ALX_MAC_CTRL_SPEED_SHIFT 20 +#define ALX_MAC_CTRL_SPEED_10_100 1 +#define ALX_MAC_CTRL_SPEED_1000 2 +#define ALX_MAC_CTRL_SIMR BIT(19) +#define ALX_MAC_CTRL_SSTCT BIT(17) +#define ALX_MAC_CTRL_TPAUSE BIT(16) +#define ALX_MAC_CTRL_PROMISC_EN BIT(15) +#define ALX_MAC_CTRL_VLANSTRIP BIT(14) +#define ALX_MAC_CTRL_PRMBLEN_MASK 0xFUL +#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10 +#define ALX_MAC_CTRL_RHUGE_EN BIT(9) +#define ALX_MAC_CTRL_FLCHK BIT(8) +#define ALX_MAC_CTRL_PCRCE BIT(7) +#define ALX_MAC_CTRL_CRCE BIT(6) +#define ALX_MAC_CTRL_FULLD BIT(5) +#define ALX_MAC_CTRL_LPBACK_EN BIT(4) +#define ALX_MAC_CTRL_RXFC_EN BIT(3) +#define ALX_MAC_CTRL_TXFC_EN BIT(2) +#define ALX_MAC_CTRL_RX_EN BIT(1) +#define ALX_MAC_CTRL_TX_EN BIT(0) + +#define ALX_GAP 0x1484 +#define ALX_GAP_IPGR2_MASK 0x7FUL +#define ALX_GAP_IPGR2_SHIFT 24 +#define ALX_GAP_IPGR1_MASK 0x7FUL +#define ALX_GAP_IPGR1_SHIFT 16 +#define ALX_GAP_MIN_IFG_MASK 0xFFUL +#define ALX_GAP_MIN_IFG_SHIFT 8 +#define ALX_GAP_IPGT_MASK 0x7FUL +#define ALX_GAP_IPGT_SHIFT 0 + +#define ALX_STAD0 0x1488 +#define ALX_STAD1 0x148C + +#define ALX_HASH_TBL0 0x1490 +#define ALX_HASH_TBL1 0x1494 + +#define ALX_HALFD 0x1498 +#define ALX_HALFD_JAM_IPG_MASK 0xFUL +#define ALX_HALFD_JAM_IPG_SHIFT 24 +#define ALX_HALFD_ABEBT_MASK 0xFUL +#define ALX_HALFD_ABEBT_SHIFT 20 +#define ALX_HALFD_ABEBE BIT(19) +#define ALX_HALFD_BPNB BIT(18) +#define ALX_HALFD_NOBO BIT(17) +#define ALX_HALFD_EDXSDFR BIT(16) +#define ALX_HALFD_RETRY_MASK 0xFUL +#define ALX_HALFD_RETRY_SHIFT 12 +#define ALX_HALFD_LCOL_MASK 0x3FFUL +#define ALX_HALFD_LCOL_SHIFT 0 + +#define ALX_MTU 0x149C +#define ALX_MTU_JUMBO_TH 1514 +#define ALX_MTU_STD_ALGN 1536 +#define ALX_MTU_MIN 64 + +#define ALX_SRAM0 0x1500 +#define ALX_SRAM_RFD_TAIL_ADDR_MASK 0xFFFUL +#define ALX_SRAM_RFD_TAIL_ADDR_SHIFT 16 +#define ALX_SRAM_RFD_HEAD_ADDR_MASK 0xFFFUL +#define ALX_SRAM_RFD_HEAD_ADDR_SHIFT 0 + +#define ALX_SRAM1 0x1510 +#define ALX_SRAM_RFD_LEN_MASK 0xFFFUL +#define ALX_SRAM_RFD_LEN_SHIFT 0 + +#define ALX_SRAM2 0x1518 +#define ALX_SRAM_TRD_TAIL_ADDR_MASK 0xFFFUL +#define ALX_SRAM_TRD_TAIL_ADDR_SHIFT 16 +#define ALX_SRMA_TRD_HEAD_ADDR_MASK 0xFFFUL +#define ALX_SRAM_TRD_HEAD_ADDR_SHIFT 0 + +#define ALX_SRAM3 0x151C +#define ALX_SRAM_TRD_LEN_MASK 0xFFFUL +#define ALX_SRAM_TRD_LEN_SHIFT 0 + +#define ALX_SRAM4 0x1520 +#define ALX_SRAM_RXF_TAIL_ADDR_MASK 0xFFFUL +#define ALX_SRAM_RXF_TAIL_ADDR_SHIFT 16 +#define ALX_SRAM_RXF_HEAD_ADDR_MASK 0xFFFUL +#define ALX_SRAM_RXF_HEAD_ADDR_SHIFT 0 + +#define ALX_SRAM5 0x1524 +#define ALX_SRAM_RXF_LEN_MASK 0xFFFUL +#define ALX_SRAM_RXF_LEN_SHIFT 0 +#define ALX_SRAM_RXF_LEN_8K (8*1024) + +#define ALX_SRAM6 0x1528 +#define ALX_SRAM_TXF_TAIL_ADDR_MASK 0xFFFUL +#define ALX_SRAM_TXF_TAIL_ADDR_SHIFT 16 +#define ALX_SRAM_TXF_HEAD_ADDR_MASK 0xFFFUL +#define ALX_SRAM_TXF_HEAD_ADDR_SHIFT 0 + +#define ALX_SRAM7 0x152C +#define ALX_SRAM_TXF_LEN_MASK 0xFFFUL +#define ALX_SRAM_TXF_LEN_SHIFT 0 + +#define ALX_SRAM8 0x1530 +#define ALX_SRAM_PATTERN_ADDR_MASK 0xFFFUL +#define ALX_SRAM_PATTERN_ADDR_SHIFT 16 +#define ALX_SRAM_TSO_ADDR_MASK 0xFFFUL +#define ALX_SRAM_TSO_ADDR_SHIFT 0 + +#define ALX_SRAM9 0x1534 +#define ALX_SRAM_LOAD_PTR BIT(0) + +#define ALX_RX_BASE_ADDR_HI 0x1540 + +#define ALX_TX_BASE_ADDR_HI 0x1544 + +#define ALX_RFD_ADDR_LO 0x1550 +#define ALX_RFD_RING_SZ 0x1560 +#define ALX_RFD_BUF_SZ 0x1564 +#define ALX_RFD_BUF_SZ_MASK 0xFFFFUL +#define ALX_RFD_BUF_SZ_SHIFT 0 + +#define ALX_RRD_ADDR_LO 0x1568 +#define ALX_RRD_RING_SZ 0x1578 +#define ALX_RRD_RING_SZ_MASK 0xFFFUL +#define ALX_RRD_RING_SZ_SHIFT 0 + +/* pri3: highest, pri0: lowest */ +#define ALX_TPD_PRI3_ADDR_LO 0x14E4 +#define ALX_TPD_PRI2_ADDR_LO 0x14E0 +#define ALX_TPD_PRI1_ADDR_LO 0x157C +#define ALX_TPD_PRI0_ADDR_LO 0x1580 + +/* producer index is 16bit */ +#define ALX_TPD_PRI3_PIDX 0x1618 +#define ALX_TPD_PRI2_PIDX 0x161A +#define ALX_TPD_PRI1_PIDX 0x15F0 +#define ALX_TPD_PRI0_PIDX 0x15F2 + +/* consumer index is 16bit */ +#define ALX_TPD_PRI3_CIDX 0x161C +#define ALX_TPD_PRI2_CIDX 0x161E +#define ALX_TPD_PRI1_CIDX 0x15F4 +#define ALX_TPD_PRI0_CIDX 0x15F6 + +#define ALX_TPD_RING_SZ 0x1584 +#define ALX_TPD_RING_SZ_MASK 0xFFFFUL +#define ALX_TPD_RING_SZ_SHIFT 0 + +#define ALX_CMB_ADDR_LO 0x1588 + +#define ALX_TXQ0 0x1590 +#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFFUL +#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16 +#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200 +#define ALX_TXQ0_PEDING_CLR BIT(8) +#define ALX_TXQ0_LSO_8023_EN BIT(7) +#define ALX_TXQ0_MODE_ENHANCE BIT(6) +#define ALX_TXQ0_EN BIT(5) +#define ALX_TXQ0_SUPT_IPOPT BIT(4) +#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xFUL +#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0 +#define ALX_TXQ_TPD_BURSTPREF_DEF 5 + +#define ALX_TXQ1 0x1594 +/* bit11: drop large packet, len > (rfd buf) */ +#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11) +/* bit[9:0]: 8bytes unit */ +#define ALX_TXQ1_JUMBO_TSOTHR_MASK 0x7FFUL +#define ALX_TXQ1_JUMBO_TSOTHR_SHIFT 0 +#define ALX_TXQ1_JUMBO_TSO_TH (7*1024) + +/* L1 entrance control */ +#define ALX_TXQ2 0x1598 +#define ALX_TXQ2_BURST_EN BIT(31) +#define ALX_TXQ2_BURST_HI_WM_MASK 0xFFFUL +#define ALX_TXQ2_BURST_HI_WM_SHIFT 16 +#define ALX_TXQ2_BURST_LO_WM_MASK 0xFFFUL +#define ALX_TXQ2_BURST_LO_WM_SHIFT 0 + +#define ALX_RXQ0 0x15A0 +#define ALX_RXQ0_EN BIT(31) +#define ALX_RXQ0_CUT_THRU_EN BIT(30) +#define ALX_RXQ0_RSS_HASH_EN BIT(29) +/* bit28: 0:goto Q0, 1:as table */ +#define ALX_RXQ0_NON_IP_QTBL BIT(28) +#define ALX_RXQ0_RSS_MODE_MASK 0x3UL +#define ALX_RXQ0_RSS_MODE_SHIFT 26 +#define ALX_RXQ0_RSS_MODE_DIS 0 +#define ALX_RXQ0_RSS_MODE_SQSI 1 +#define ALX_RXQ0_RSS_MODE_MQSI 2 +#define ALX_RXQ0_RSS_MODE_MQMI 3 +#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3FUL +#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20 +#define ALX_RXQ0_NUM_RFD_PREF_DEF 8 +#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FFUL +#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8 +#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100 +#define ALX_RXQ0_IPV6_PARSE_EN BIT(7) +#define ALX_RXQ0_RSS_HSTYP_MASK 0xFUL +#define ALX_RXQ0_RSS_HSTYP_SHIFT 2 +#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5) +#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4) +#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3) +#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2) +#define ALX_RXQ0_RSS_HSTYP_ALL (\ + ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN |\ + ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN |\ + ALX_RXQ0_RSS_HSTYP_IPV6_EN |\ + ALX_RXQ0_RSS_HSTYP_IPV4_EN) +#define ALX_RXQ0_ASPM_THRESH_MASK 0x3UL +#define ALX_RXQ0_ASPM_THRESH_SHIFT 0 +#define ALX_RXQ0_ASPM_THRESH_NO 0 +#define ALX_RXQ0_ASPM_THRESH_1M 1 +#define ALX_RXQ0_ASPM_THRESH_10M 2 +#define ALX_RXQ0_ASPM_THRESH_100M 3 + +#define ALX_RXQ1 0x15A4 +/* 32bytes unit */ +#define ALX_RXQ1_JUMBO_LKAH_MASK 0xFUL +#define ALX_RXQ1_JUMBO_LKAH_SHIFT 12 +#define ALX_RXQ1_RFD_PREF_DOWN_MASK 0x3FUL +#define ALX_RXQ1_RFD_PREF_DOWN_SHIFT 6 +#define ALX_RXQ1_RFD_PREF_UP_MASK 0x3FUL +#define ALX_RXQ1_RFD_PREF_UP_SHIFT 0 + +#define ALX_RXQ2 0x15A8 +/* XOFF: USED SRAM LOWER THAN IT, THEN NOTIFY THE PEER TO SEND AGAIN */ +#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFFUL +#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16 +#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFFUL +#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0 +/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) + + * rx-packet(1522) + delay-of-link(64) + * = 3212. + */ +#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212 + +#define ALX_RXQ3 0x15AC +#define ALX_RXQ3_RXD_TIMER_MASK 0x7FFFUL +#define ALX_RXQ3_RXD_TIMER_SHIFT 16 +/* 8bytes unit */ +#define ALX_RXQ3_RXD_THRESH_MASK 0xFFFUL +#define ALX_RXQ3_RXD_THRESH_SHIFT 0 + +#define ALX_DMA 0x15C0 +#define ALX_DMA_SMB_NOW BIT(31) +#define ALX_DMA_WPEND_CLR BIT(30) +#define ALX_DMA_RPEND_CLR BIT(29) +#define ALX_DMA_WSRAM_RDCTRL BIT(28) +#define ALX_DMA_RCHNL_SEL_MASK 0x3UL +#define ALX_DMA_RCHNL_SEL_SHIFT 26 +#define ALX_DMA_RCHNL_SEL_1 0 +#define ALX_DMA_RCHNL_SEL_2 1 +#define ALX_DMA_RCHNL_SEL_3 2 +#define ALX_DMA_RCHNL_SEL_4 3 +#define ALX_DMA_SMB_EN BIT(21) +#define ALX_DMA_WDLY_CNT_MASK 0xFUL +#define ALX_DMA_WDLY_CNT_SHIFT 16 +#define ALX_DMA_WDLY_CNT_DEF 4 +#define ALX_DMA_RDLY_CNT_MASK 0x1FUL +#define ALX_DMA_RDLY_CNT_SHIFT 11 +#define ALX_DMA_RDLY_CNT_DEF 15 +/* bit10: 0:tpd with pri, 1: data */ +#define ALX_DMA_RREQ_PRI_DATA BIT(10) +#define ALX_DMA_WREQ_BLEN_MASK 0x7UL +#define ALX_DMA_WREQ_BLEN_SHIFT 7 +#define ALX_DMA_RREQ_BLEN_MASK 0x7UL +#define ALX_DMA_RREQ_BLEN_SHIFT 4 +#define ALX_DMA_PENDING_AUTO_RST BIT(3) +#define ALX_DMA_RORDER_MODE_MASK 0x7UL +#define ALX_DMA_RORDER_MODE_SHIFT 0 +#define ALX_DMA_RORDER_MODE_OUT 4 +#define ALX_DMA_RORDER_MODE_ENHANCE 2 +#define ALX_DMA_RORDER_MODE_IN 1 + +#define ALX_WOL0 0x14A0 +#define ALX_WOL0_PT7_MATCH BIT(31) +#define ALX_WOL0_PT6_MATCH BIT(30) +#define ALX_WOL0_PT5_MATCH BIT(29) +#define ALX_WOL0_PT4_MATCH BIT(28) +#define ALX_WOL0_PT3_MATCH BIT(27) +#define ALX_WOL0_PT2_MATCH BIT(26) +#define ALX_WOL0_PT1_MATCH BIT(25) +#define ALX_WOL0_PT0_MATCH BIT(24) +#define ALX_WOL0_PT7_EN BIT(23) +#define ALX_WOL0_PT6_EN BIT(22) +#define ALX_WOL0_PT5_EN BIT(21) +#define ALX_WOL0_PT4_EN BIT(20) +#define ALX_WOL0_PT3_EN BIT(19) +#define ALX_WOL0_PT2_EN BIT(18) +#define ALX_WOL0_PT1_EN BIT(17) +#define ALX_WOL0_PT0_EN BIT(16) +#define ALX_WOL0_IPV4_SYNC_EVT BIT(14) +#define ALX_WOL0_IPV6_SYNC_EVT BIT(13) +#define ALX_WOL0_LINK_EVT BIT(10) +#define ALX_WOL0_MAGIC_EVT BIT(9) +#define ALX_WOL0_PATTERN_EVT BIT(8) +#define ALX_WOL0_SWOI_EVT BIT(7) +#define ALX_WOL0_OOB_EN BIT(6) +#define ALX_WOL0_PME_LINK BIT(5) +#define ALX_WOL0_LINK_EN BIT(4) +#define ALX_WOL0_PME_MAGIC_EN BIT(3) +#define ALX_WOL0_MAGIC_EN BIT(2) +#define ALX_WOL0_PME_PATTERN_EN BIT(1) +#define ALX_WOL0_PATTERN_EN BIT(0) + +#define ALX_WOL1 0x14A4 +#define ALX_WOL1_PT3_LEN_MASK 0xFFUL +#define ALX_WOL1_PT3_LEN_SHIFT 24 +#define ALX_WOL1_PT2_LEN_MASK 0xFFUL +#define ALX_WOL1_PT2_LEN_SHIFT 16 +#define ALX_WOL1_PT1_LEN_MASK 0xFFUL +#define ALX_WOL1_PT1_LEN_SHIFT 8 +#define ALX_WOL1_PT0_LEN_MASK 0xFFUL +#define ALX_WOL1_PT0_LEN_SHIFT 0 + +#define ALX_WOL2 0x14A8 +#define ALX_WOL2_PT7_LEN_MASK 0xFFUL +#define ALX_WOL2_PT7_LEN_SHIFT 24 +#define ALX_WOL2_PT6_LEN_MASK 0xFFUL +#define ALX_WOL2_PT6_LEN_SHIFT 16 +#define ALX_WOL2_PT5_LEN_MASK 0xFFUL +#define ALX_WOL2_PT5_LEN_SHIFT 8 +#define ALX_WOL2_PT4_LEN_MASK 0xFFUL +#define ALX_WOL2_PT4_LEN_SHIFT 0 + +#define ALX_RFD_PIDX 0x15E0 +#define ALX_RFD_PIDX_MASK 0xFFFUL +#define ALX_RFD_PIDX_SHIFT 0 + +#define ALX_RFD_CIDX 0x15F8 +#define ALX_RFD_CIDX_MASK 0xFFFUL +#define ALX_RFD_CIDX_SHIFT 0 + +/* MIB */ +#define ALX_MIB_BASE 0x1700 +#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0) +#define ALX_MIB_RX_BC (ALX_MIB_BASE + 4) +#define ALX_MIB_RX_MC (ALX_MIB_BASE + 8) +#define ALX_MIB_RX_PAUSE (ALX_MIB_BASE + 12) +#define ALX_MIB_RX_CTRL (ALX_MIB_BASE + 16) +#define ALX_MIB_RX_FCS (ALX_MIB_BASE + 20) +#define ALX_MIB_RX_LENERR (ALX_MIB_BASE + 24) +#define ALX_MIB_RX_BYTCNT (ALX_MIB_BASE + 28) +#define ALX_MIB_RX_RUNT (ALX_MIB_BASE + 32) +#define ALX_MIB_RX_FRAGMENT (ALX_MIB_BASE + 36) +#define ALX_MIB_RX_64B (ALX_MIB_BASE + 40) +#define ALX_MIB_RX_127B (ALX_MIB_BASE + 44) +#define ALX_MIB_RX_255B (ALX_MIB_BASE + 48) +#define ALX_MIB_RX_511B (ALX_MIB_BASE + 52) +#define ALX_MIB_RX_1023B (ALX_MIB_BASE + 56) +#define ALX_MIB_RX_1518B (ALX_MIB_BASE + 60) +#define ALX_MIB_RX_SZMAX (ALX_MIB_BASE + 64) +#define ALX_MIB_RX_OVSZ (ALX_MIB_BASE + 68) +#define ALX_MIB_RXF_OV (ALX_MIB_BASE + 72) +#define ALX_MIB_RRD_OV (ALX_MIB_BASE + 76) +#define ALX_MIB_RX_ALIGN (ALX_MIB_BASE + 80) +#define ALX_MIB_RX_BCCNT (ALX_MIB_BASE + 84) +#define ALX_MIB_RX_MCCNT (ALX_MIB_BASE + 88) +#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92) +#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96) +#define ALX_MIB_TX_BC (ALX_MIB_BASE + 100) +#define ALX_MIB_TX_MC (ALX_MIB_BASE + 104) +#define ALX_MIB_TX_PAUSE (ALX_MIB_BASE + 108) +#define ALX_MIB_TX_EXCDEFER (ALX_MIB_BASE + 112) +#define ALX_MIB_TX_CTRL (ALX_MIB_BASE + 116) +#define ALX_MIB_TX_DEFER (ALX_MIB_BASE + 120) +#define ALX_MIB_TX_BYTCNT (ALX_MIB_BASE + 124) +#define ALX_MIB_TX_64B (ALX_MIB_BASE + 128) +#define ALX_MIB_TX_127B (ALX_MIB_BASE + 132) +#define ALX_MIB_TX_255B (ALX_MIB_BASE + 136) +#define ALX_MIB_TX_511B (ALX_MIB_BASE + 140) +#define ALX_MIB_TX_1023B (ALX_MIB_BASE + 144) +#define ALX_MIB_TX_1518B (ALX_MIB_BASE + 148) +#define ALX_MIB_TX_SZMAX (ALX_MIB_BASE + 152) +#define ALX_MIB_TX_1COL (ALX_MIB_BASE + 156) +#define ALX_MIB_TX_2COL (ALX_MIB_BASE + 160) +#define ALX_MIB_TX_LATCOL (ALX_MIB_BASE + 164) +#define ALX_MIB_TX_ABRTCOL (ALX_MIB_BASE + 168) +#define ALX_MIB_TX_UNDRUN (ALX_MIB_BASE + 172) +#define ALX_MIB_TX_TRDBEOP (ALX_MIB_BASE + 176) +#define ALX_MIB_TX_LENERR (ALX_MIB_BASE + 180) +#define ALX_MIB_TX_TRUNC (ALX_MIB_BASE + 184) +#define ALX_MIB_TX_BCCNT (ALX_MIB_BASE + 188) +#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192) +#define ALX_MIB_UPDATE (ALX_MIB_BASE + 196) + +#define ALX_RX_STATS_BIN ALX_MIB_RX_OK +#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR +#define ALX_TX_STATS_BIN ALX_MIB_TX_OK +#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT + +#define ALX_ISR 0x1600 +#define ALX_ISR_DIS BIT(31) +#define ALX_ISR_RX_Q7 BIT(30) +#define ALX_ISR_RX_Q6 BIT(29) +#define ALX_ISR_RX_Q5 BIT(28) +#define ALX_ISR_RX_Q4 BIT(27) +#define ALX_ISR_PCIE_LNKDOWN BIT(26) +#define ALX_ISR_PCIE_CERR BIT(25) +#define ALX_ISR_PCIE_NFERR BIT(24) +#define ALX_ISR_PCIE_FERR BIT(23) +#define ALX_ISR_PCIE_UR BIT(22) +#define ALX_ISR_MAC_TX BIT(21) +#define ALX_ISR_MAC_RX BIT(20) +#define ALX_ISR_RX_Q3 BIT(19) +#define ALX_ISR_RX_Q2 BIT(18) +#define ALX_ISR_RX_Q1 BIT(17) +#define ALX_ISR_RX_Q0 BIT(16) +#define ALX_ISR_TX_Q0 BIT(15) +#define ALX_ISR_TXQ_TO BIT(14) +#define ALX_ISR_PHY_LPW BIT(13) +#define ALX_ISR_PHY BIT(12) +#define ALX_ISR_TX_CREDIT BIT(11) +#define ALX_ISR_DMAW BIT(10) +#define ALX_ISR_DMAR BIT(9) +#define ALX_ISR_TXF_UR BIT(8) +#define ALX_ISR_TX_Q3 BIT(7) +#define ALX_ISR_TX_Q2 BIT(6) +#define ALX_ISR_TX_Q1 BIT(5) +#define ALX_ISR_RFD_UR BIT(4) +#define ALX_ISR_RXF_OV BIT(3) +#define ALX_ISR_MANU BIT(2) +#define ALX_ISR_TIMER BIT(1) +#define ALX_ISR_SMB BIT(0) + +#define ALX_IMR 0x1604 + +/* re-send assert msg if SW no response */ +#define ALX_INT_RETRIG 0x1608 +#define ALX_INT_RETRIG_TIMER_MASK 0xFFFFUL +#define ALX_INT_RETRIG_TIMER_SHIFT 0 +/* 40ms */ +#define ALX_INT_RETRIG_TO 20000 + +/* re-send deassert msg if SW no response */ +#define ALX_INT_DEASST_TIMER 0x1614 + +/* reg1620 used for sleep status */ +#define ALX_PATTERN_MASK 0x1620 +#define ALX_PATTERN_MASK_LEN 128 + + +#define ALX_FLT1_SRC_IP0 0x1A00 +#define ALX_FLT1_SRC_IP1 0x1A04 +#define ALX_FLT1_SRC_IP2 0x1A08 +#define ALX_FLT1_SRC_IP3 0x1A0C +#define ALX_FLT1_DST_IP0 0x1A10 +#define ALX_FLT1_DST_IP1 0x1A14 +#define ALX_FLT1_DST_IP2 0x1A18 +#define ALX_FLT1_DST_IP3 0x1A1C +#define ALX_FLT1_PORT 0x1A20 +#define ALX_FLT1_PORT_DST_MASK 0xFFFFUL +#define ALX_FLT1_PORT_DST_SHIFT 16 +#define ALX_FLT1_PORT_SRC_MASK 0xFFFFUL +#define ALX_FLT1_PORT_SRC_SHIFT 0 + +#define ALX_FLT2_SRC_IP0 0x1A24 +#define ALX_FLT2_SRC_IP1 0x1A28 +#define ALX_FLT2_SRC_IP2 0x1A2C +#define ALX_FLT2_SRC_IP3 0x1A30 +#define ALX_FLT2_DST_IP0 0x1A34 +#define ALX_FLT2_DST_IP1 0x1A38 +#define ALX_FLT2_DST_IP2 0x1A40 +#define ALX_FLT2_DST_IP3 0x1A44 +#define ALX_FLT2_PORT 0x1A48 +#define ALX_FLT2_PORT_DST_MASK 0xFFFFUL +#define ALX_FLT2_PORT_DST_SHIFT 16 +#define ALX_FLT2_PORT_SRC_MASK 0xFFFFUL +#define ALX_FLT2_PORT_SRC_SHIFT 0 + +#define ALX_FLT3_SRC_IP0 0x1A4C +#define ALX_FLT3_SRC_IP1 0x1A50 +#define ALX_FLT3_SRC_IP2 0x1A54 +#define ALX_FLT3_SRC_IP3 0x1A58 +#define ALX_FLT3_DST_IP0 0x1A5C +#define ALX_FLT3_DST_IP1 0x1A60 +#define ALX_FLT3_DST_IP2 0x1A64 +#define ALX_FLT3_DST_IP3 0x1A68 +#define ALX_FLT3_PORT 0x1A6C +#define ALX_FLT3_PORT_DST_MASK 0xFFFFUL +#define ALX_FLT3_PORT_DST_SHIFT 16 +#define ALX_FLT3_PORT_SRC_MASK 0xFFFFUL +#define ALX_FLT3_PORT_SRC_SHIFT 0 + +#define ALX_FLT4_SRC_IP0 0x1A70 +#define ALX_FLT4_SRC_IP1 0x1A74 +#define ALX_FLT4_SRC_IP2 0x1A78 +#define ALX_FLT4_SRC_IP3 0x1A7C +#define ALX_FLT4_DST_IP0 0x1A80 +#define ALX_FLT4_DST_IP1 0x1A84 +#define ALX_FLT4_DST_IP2 0x1A88 +#define ALX_FLT4_DST_IP3 0x1A8C +#define ALX_FLT4_PORT 0x1A90 +#define ALX_FLT4_PORT_DST_MASK 0xFFFFUL +#define ALX_FLT4_PORT_DST_SHIFT 16 +#define ALX_FLT4_PORT_SRC_MASK 0xFFFFUL +#define ALX_FLT4_PORT_SRC_SHIFT 0 + +#define ALX_FLT5_SRC_IP0 0x1A94 +#define ALX_FLT5_SRC_IP1 0x1A98 +#define ALX_FLT5_SRC_IP2 0x1A9C +#define ALX_FLT5_SRC_IP3 0x1AA0 +#define ALX_FLT5_DST_IP0 0x1AA4 +#define ALX_FLT5_DST_IP1 0x1AA8 +#define ALX_FLT5_DST_IP2 0x1AAC +#define ALX_FLT5_DST_IP3 0x1AB0 +#define ALX_FLT5_PORT 0x1AB4 +#define ALX_FLT5_PORT_DST_MASK 0xFFFFUL +#define ALX_FLT5_PORT_DST_SHIFT 16 +#define ALX_FLT5_PORT_SRC_MASK 0xFFFFUL +#define ALX_FLT5_PORT_SRC_SHIFT 0 + +#define ALX_FLT6_SRC_IP0 0x1AB8 +#define ALX_FLT6_SRC_IP1 0x1ABC +#define ALX_FLT6_SRC_IP2 0x1AC0 +#define ALX_FLT6_SRC_IP3 0x1AC8 +#define ALX_FLT6_DST_IP0 0x1620 +#define ALX_FLT6_DST_IP1 0x1624 +#define ALX_FLT6_DST_IP2 0x1628 +#define ALX_FLT6_DST_IP3 0x162C +#define ALX_FLT6_PORT 0x1630 +#define ALX_FLT6_PORT_DST_MASK 0xFFFFUL +#define ALX_FLT6_PORT_DST_SHIFT 16 +#define ALX_FLT6_PORT_SRC_MASK 0xFFFFUL +#define ALX_FLT6_PORT_SRC_SHIFT 0 + +#define ALX_FLTCTRL 0x1634 +#define ALX_FLTCTRL_PSTHR_TIMER_MASK 0xFFUL +#define ALX_FLTCTRL_PSTHR_TIMER_SHIFT 24 +#define ALX_FLTCTRL_CHK_DSTPRT6 BIT(23) +#define ALX_FLTCTRL_CHK_SRCPRT6 BIT(22) +#define ALX_FLTCTRL_CHK_DSTIP6 BIT(21) +#define ALX_FLTCTRL_CHK_SRCIP6 BIT(20) +#define ALX_FLTCTRL_CHK_DSTPRT5 BIT(19) +#define ALX_FLTCTRL_CHK_SRCPRT5 BIT(18) +#define ALX_FLTCTRL_CHK_DSTIP5 BIT(17) +#define ALX_FLTCTRL_CHK_SRCIP5 BIT(16) +#define ALX_FLTCTRL_CHK_DSTPRT4 BIT(15) +#define ALX_FLTCTRL_CHK_SRCPRT4 BIT(14) +#define ALX_FLTCTRL_CHK_DSTIP4 BIT(13) +#define ALX_FLTCTRL_CHK_SRCIP4 BIT(12) +#define ALX_FLTCTRL_CHK_DSTPRT3 BIT(11) +#define ALX_FLTCTRL_CHK_SRCPRT3 BIT(10) +#define ALX_FLTCTRL_CHK_DSTIP3 BIT(9) +#define ALX_FLTCTRL_CHK_SRCIP3 BIT(8) +#define ALX_FLTCTRL_CHK_DSTPRT2 BIT(7) +#define ALX_FLTCTRL_CHK_SRCPRT2 BIT(6) +#define ALX_FLTCTRL_CHK_DSTIP2 BIT(5) +#define ALX_FLTCTRL_CHK_SRCIP2 BIT(4) +#define ALX_FLTCTRL_CHK_DSTPRT1 BIT(3) +#define ALX_FLTCTRL_CHK_SRCPRT1 BIT(2) +#define ALX_FLTCTRL_CHK_DSTIP1 BIT(1) +#define ALX_FLTCTRL_CHK_SRCIP1 BIT(0) + +#define ALX_DROP_ALG1 0x1638 +#define ALX_DROP_ALG1_BWCHGVAL_MASK 0xFFFFFUL +#define ALX_DROP_ALG1_BWCHGVAL_SHIFT 12 +/* bit11: 0:3.125%, 1:6.25% */ +#define ALX_DROP_ALG1_BWCHGSCL_6 BIT(11) +#define ALX_DROP_ALG1_ASUR_LWQ_EN BIT(10) +#define ALX_DROP_ALG1_BWCHGVAL_EN BIT(9) +#define ALX_DROP_ALG1_BWCHGSCL_EN BIT(8) +#define ALX_DROP_ALG1_PSTHR_AUTO BIT(7) +#define ALX_DROP_ALG1_MIN_PSTHR_MASK 0x3UL +#define ALX_DROP_ALG1_MIN_PSTHR_SHIFT 5 +#define ALX_DROP_ALG1_MIN_PSTHR_1_16 0 +#define ALX_DROP_ALG1_MIN_PSTHR_1_8 1 +#define ALX_DROP_ALG1_MIN_PSTHR_1_4 2 +#define ALX_DROP_ALG1_MIN_PSTHR_1_2 3 +#define ALX_DROP_ALG1_PSCL_MASK 0x3UL +#define ALX_DROP_ALG1_PSCL_SHIFT 3 +#define ALX_DROP_ALG1_PSCL_1_4 0 +#define ALX_DROP_ALG1_PSCL_1_8 1 +#define ALX_DROP_ALG1_PSCL_1_16 2 +#define ALX_DROP_ALG1_PSCL_1_32 3 +#define ALX_DROP_ALG1_TIMESLOT_MASK 0x7UL +#define ALX_DROP_ALG1_TIMESLOT_SHIFT 0 +#define ALX_DROP_ALG1_TIMESLOT_4MS 0 +#define ALX_DROP_ALG1_TIMESLOT_8MS 1 +#define ALX_DROP_ALG1_TIMESLOT_16MS 2 +#define ALX_DROP_ALG1_TIMESLOT_32MS 3 +#define ALX_DROP_ALG1_TIMESLOT_64MS 4 +#define ALX_DROP_ALG1_TIMESLOT_128MS 5 +#define ALX_DROP_ALG1_TIMESLOT_256MS 6 +#define ALX_DROP_ALG1_TIMESLOT_512MS 7 + +#define ALX_DROP_ALG2 0x163C +#define ALX_DROP_ALG2_SMPLTIME_MASK 0xFUL +#define ALX_DROP_ALG2_SMPLTIME_SHIFT 24 +#define ALX_DROP_ALG2_LWQBW_MASK 0xFFFFFFUL +#define ALX_DROP_ALG2_LWQBW_SHIFT 0 + +#define ALX_SMB_TIMER 0x15C4 + +#define ALX_TINT_TPD_THRSHLD 0x15C8 + +#define ALX_TINT_TIMER 0x15CC + +#define ALX_CLK_GATE 0x1814 +/* bit[8:6]: for B0+ */ +#define ALX_CLK_GATE_125M_SW_DIS_CR BIT(8) +#define ALX_CLK_GATE_125M_SW_AZ BIT(7) +#define ALX_CLK_GATE_125M_SW_IDLE BIT(6) +#define ALX_CLK_GATE_RXMAC BIT(5) +#define ALX_CLK_GATE_TXMAC BIT(4) +#define ALX_CLK_GATE_RXQ BIT(3) +#define ALX_CLK_GATE_TXQ BIT(2) +#define ALX_CLK_GATE_DMAR BIT(1) +#define ALX_CLK_GATE_DMAW BIT(0) +#define ALX_CLK_GATE_ALL_A0 (\ + ALX_CLK_GATE_RXMAC |\ + ALX_CLK_GATE_TXMAC |\ + ALX_CLK_GATE_RXQ |\ + ALX_CLK_GATE_TXQ |\ + ALX_CLK_GATE_DMAR |\ + ALX_CLK_GATE_DMAW) +#define ALX_CLK_GATE_ALL_B0 (\ + ALX_CLK_GATE_ALL_A0) + +/* PORST affect */ +#define ALX_BTROM_CFG 0x1800 + +/* interop between drivers */ +#define ALX_DRV 0x1804 +#define ALX_DRV_PHY_AUTO BIT(28) +#define ALX_DRV_PHY_1000 BIT(27) +#define ALX_DRV_PHY_100 BIT(26) +#define ALX_DRV_PHY_10 BIT(25) +#define ALX_DRV_PHY_DUPLEX BIT(24) +/* bit23: adv Pause */ +#define ALX_DRV_PHY_PAUSE BIT(23) +/* bit22: adv Asym Pause */ +#define ALX_DRV_PHY_APAUSE BIT(22) +/* bit21: 1:en AZ */ +#define ALX_DRV_PHY_EEE BIT(21) +#define ALX_DRV_PHY_MASK 0xFFUL +#define ALX_DRV_PHY_SHIFT 21 +#define ALX_DRV_PHY_UNKNOWN 0 +#define ALX_DRV_DISABLE BIT(18) +#define ALX_DRV_WOLS5_EN BIT(17) +#define ALX_DRV_WOLS5_BIOS_EN BIT(16) +#define ALX_DRV_AZ_EN BIT(12) +#define ALX_DRV_WOLPATTERN_EN BIT(11) +#define ALX_DRV_WOLLINKUP_EN BIT(10) +#define ALX_DRV_WOLMAGIC_EN BIT(9) +#define ALX_DRV_WOLCAP_BIOS_EN BIT(8) +#define ALX_DRV_ASPM_SPD1000LMT_MASK 0x3UL +#define ALX_DRV_ASPM_SPD1000LMT_SHIFT 4 +#define ALX_DRV_ASPM_SPD1000LMT_100M 0 +#define ALX_DRV_ASPM_SPD1000LMT_NO 1 +#define ALX_DRV_ASPM_SPD1000LMT_1M 2 +#define ALX_DRV_ASPM_SPD1000LMT_10M 3 +#define ALX_DRV_ASPM_SPD100LMT_MASK 0x3UL +#define ALX_DRV_ASPM_SPD100LMT_SHIFT 2 +#define ALX_DRV_ASPM_SPD100LMT_1M 0 +#define ALX_DRV_ASPM_SPD100LMT_10M 1 +#define ALX_DRV_ASPM_SPD100LMT_100M 2 +#define ALX_DRV_ASPM_SPD100LMT_NO 3 +#define ALX_DRV_ASPM_SPD10LMT_MASK 0x3UL +#define ALX_DRV_ASPM_SPD10LMT_SHIFT 0 +#define ALX_DRV_ASPM_SPD10LMT_1M 0 +#define ALX_DRV_ASPM_SPD10LMT_10M 1 +#define ALX_DRV_ASPM_SPD10LMT_100M 2 +#define ALX_DRV_ASPM_SPD10LMT_NO 3 + +/* flag of phy inited */ +#define ALX_PHY_INITED 0x003F + +/* PERST affect */ +#define ALX_DRV_ERR1 0x1808 +#define ALX_DRV_ERR1_GEN BIT(31) +#define ALX_DRV_ERR1_NOR BIT(30) +#define ALX_DRV_ERR1_TRUNC BIT(29) +#define ALX_DRV_ERR1_RES BIT(28) +#define ALX_DRV_ERR1_INTFATAL BIT(27) +#define ALX_DRV_ERR1_TXQPEND BIT(26) +#define ALX_DRV_ERR1_DMAW BIT(25) +#define ALX_DRV_ERR1_DMAR BIT(24) +#define ALX_DRV_ERR1_PCIELNKDWN BIT(23) +#define ALX_DRV_ERR1_PKTSIZE BIT(22) +#define ALX_DRV_ERR1_FIFOFUL BIT(21) +#define ALX_DRV_ERR1_RFDUR BIT(20) +#define ALX_DRV_ERR1_RRDSI BIT(19) +#define ALX_DRV_ERR1_UPDATE BIT(18) + +#define ALX_DRV_ERR2 0x180C + +#define ALX_DBG_ADDR 0x1900 +#define ALX_DBG_DATA 0x1904 + +#define ALX_SYNC_IPV4_SA 0x1A00 +#define ALX_SYNC_IPV4_DA 0x1A04 + +#define ALX_SYNC_V4PORT 0x1A08 +#define ALX_SYNC_V4PORT_DST_MASK 0xFFFFUL +#define ALX_SYNC_V4PORT_DST_SHIFT 16 +#define ALX_SYNC_V4PORT_SRC_MASK 0xFFFFUL +#define ALX_SYNC_V4PORT_SRC_SHIFT 0 + +#define ALX_SYNC_IPV6_SA0 0x1A0C +#define ALX_SYNC_IPV6_SA1 0x1A10 +#define ALX_SYNC_IPV6_SA2 0x1A14 +#define ALX_SYNC_IPV6_SA3 0x1A18 +#define ALX_SYNC_IPV6_DA0 0x1A1C +#define ALX_SYNC_IPV6_DA1 0x1A20 +#define ALX_SYNC_IPV6_DA2 0x1A24 +#define ALX_SYNC_IPV6_DA3 0x1A28 + +#define ALX_SYNC_V6PORT 0x1A2C +#define ALX_SYNC_V6PORT_DST_MASK 0xFFFFUL +#define ALX_SYNC_V6PORT_DST_SHIFT 16 +#define ALX_SYNC_V6PORT_SRC_MASK 0xFFFFUL +#define ALX_SYNC_V6PORT_SRC_SHIFT 0 + +#define ALX_ARP_REMOTE_IPV4 0x1A30 +#define ALX_ARP_HOST_IPV4 0x1A34 +#define ALX_ARP_MAC0 0x1A38 +#define ALX_ARP_MAC1 0x1A3C + +#define ALX_1ST_REMOTE_IPV6_0 0x1A40 +#define ALX_1ST_REMOTE_IPV6_1 0x1A44 +#define ALX_1ST_REMOTE_IPV6_2 0x1A48 +#define ALX_1ST_REMOTE_IPV6_3 0x1A4C + +#define ALX_1ST_SN_IPV6_0 0x1A50 +#define ALX_1ST_SN_IPV6_1 0x1A54 +#define ALX_1ST_SN_IPV6_2 0x1A58 +#define ALX_1ST_SN_IPV6_3 0x1A5C + +#define ALX_1ST_TAR_IPV6_1_0 0x1A60 +#define ALX_1ST_TAR_IPV6_1_1 0x1A64 +#define ALX_1ST_TAR_IPV6_1_2 0x1A68 +#define ALX_1ST_TAR_IPV6_1_3 0x1A6C +#define ALX_1ST_TAR_IPV6_2_0 0x1A70 +#define ALX_1ST_TAR_IPV6_2_1 0x1A74 +#define ALX_1ST_TAR_IPV6_2_2 0x1A78 +#define ALX_1ST_TAR_IPV6_2_3 0x1A7C + +#define ALX_2ND_REMOTE_IPV6_0 0x1A80 +#define ALX_2ND_REMOTE_IPV6_1 0x1A84 +#define ALX_2ND_REMOTE_IPV6_2 0x1A88 +#define ALX_2ND_REMOTE_IPV6_3 0x1A8C + +#define ALX_2ND_SN_IPV6_0 0x1A90 +#define ALX_2ND_SN_IPV6_1 0x1A94 +#define ALX_2ND_SN_IPV6_2 0x1A98 +#define ALX_2ND_SN_IPV6_3 0x1A9C + +#define ALX_2ND_TAR_IPV6_1_0 0x1AA0 +#define ALX_2ND_TAR_IPV6_1_1 0x1AA4 +#define ALX_2ND_TAR_IPV6_1_2 0x1AA8 +#define ALX_2ND_TAR_IPV6_1_3 0x1AAC +#define ALX_2ND_TAR_IPV6_2_0 0x1AB0 +#define ALX_2ND_TAR_IPV6_2_1 0x1AB4 +#define ALX_2ND_TAR_IPV6_2_2 0x1AB8 +#define ALX_2ND_TAR_IPV6_2_3 0x1ABC + +#define ALX_1ST_NS_MAC0 0x1AC0 +#define ALX_1ST_NS_MAC1 0x1AC4 + +#define ALX_2ND_NS_MAC0 0x1AC8 +#define ALX_2ND_NS_MAC1 0x1ACC + +#define ALX_PMOFLD 0x144C +/* bit[11:10]: for B0+ */ +#define ALX_PMOFLD_ECMA_IGNR_FRG_SSSR BIT(11) +#define ALX_PMOFLD_ARP_CNFLCT_WAKEUP BIT(10) +#define ALX_PMOFLD_MULTI_SOLD BIT(9) +#define ALX_PMOFLD_ICMP_XSUM BIT(8) +#define ALX_PMOFLD_GARP_REPLY BIT(7) +#define ALX_PMOFLD_SYNCV6_ANY BIT(6) +#define ALX_PMOFLD_SYNCV4_ANY BIT(5) +#define ALX_PMOFLD_BY_HW BIT(4) +#define ALX_PMOFLD_NS_EN BIT(3) +#define ALX_PMOFLD_ARP_EN BIT(2) +#define ALX_PMOFLD_SYNCV6_EN BIT(1) +#define ALX_PMOFLD_SYNCV4_EN BIT(0) + +/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */ +#define ALX_WOL_CTRL2 0x1830 +#define ALX_WOL_CTRL2_DATA_STORE BIT(3) +#define ALX_WOL_CTRL2_PTRN_EVT BIT(2) +#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1) +#define ALX_WOL_CTRL2_PTRN_EN BIT(0) + +#define ALX_WOL_CTRL3 0x1834 +#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFFUL +#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0 + +#define ALX_WOL_CTRL4 0x1838 +#define ALX_WOL_CTRL4_PT15_MATCH BIT(31) +#define ALX_WOL_CTRL4_PT14_MATCH BIT(30) +#define ALX_WOL_CTRL4_PT13_MATCH BIT(29) +#define ALX_WOL_CTRL4_PT12_MATCH BIT(28) +#define ALX_WOL_CTRL4_PT11_MATCH BIT(27) +#define ALX_WOL_CTRL4_PT10_MATCH BIT(26) +#define ALX_WOL_CTRL4_PT9_MATCH BIT(25) +#define ALX_WOL_CTRL4_PT8_MATCH BIT(24) +#define ALX_WOL_CTRL4_PT7_MATCH BIT(23) +#define ALX_WOL_CTRL4_PT6_MATCH BIT(22) +#define ALX_WOL_CTRL4_PT5_MATCH BIT(21) +#define ALX_WOL_CTRL4_PT4_MATCH BIT(20) +#define ALX_WOL_CTRL4_PT3_MATCH BIT(19) +#define ALX_WOL_CTRL4_PT2_MATCH BIT(18) +#define ALX_WOL_CTRL4_PT1_MATCH BIT(17) +#define ALX_WOL_CTRL4_PT0_MATCH BIT(16) +#define ALX_WOL_CTRL4_PT15_EN BIT(15) +#define ALX_WOL_CTRL4_PT14_EN BIT(14) +#define ALX_WOL_CTRL4_PT13_EN BIT(13) +#define ALX_WOL_CTRL4_PT12_EN BIT(12) +#define ALX_WOL_CTRL4_PT11_EN BIT(11) +#define ALX_WOL_CTRL4_PT10_EN BIT(10) +#define ALX_WOL_CTRL4_PT9_EN BIT(9) +#define ALX_WOL_CTRL4_PT8_EN BIT(8) +#define ALX_WOL_CTRL4_PT7_EN BIT(7) +#define ALX_WOL_CTRL4_PT6_EN BIT(6) +#define ALX_WOL_CTRL4_PT5_EN BIT(5) +#define ALX_WOL_CTRL4_PT4_EN BIT(4) +#define ALX_WOL_CTRL4_PT3_EN BIT(3) +#define ALX_WOL_CTRL4_PT2_EN BIT(2) +#define ALX_WOL_CTRL4_PT1_EN BIT(1) +#define ALX_WOL_CTRL4_PT0_EN BIT(0) + +#define ALX_WOL_CTRL5 0x183C +#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0 + +#define ALX_WOL_CTRL6 0x1840 +#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0 + +#define ALX_WOL_CTRL7 0x1844 +#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0 + +#define ALX_WOL_CTRL8 0x1848 +#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFFUL +#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0 + +#define ALX_ACER_FIXED_PTN0 0x1850 +#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFFUL +#define ALX_ACER_FIXED_PTN0_SHIFT 0 + +#define ALX_ACER_FIXED_PTN1 0x1854 +#define ALX_ACER_FIXED_PTN1_MASK 0xFFFFUL +#define ALX_ACER_FIXED_PTN1_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM0 0x1858 +#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFFUL +#define ALX_ACER_RANDOM_NUM0_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM1 0x185C +#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFFUL +#define ALX_ACER_RANDOM_NUM1_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM2 0x1860 +#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFFUL +#define ALX_ACER_RANDOM_NUM2_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM3 0x1864 +#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFFUL +#define ALX_ACER_RANDOM_NUM3_SHIFT 0 + +#define ALX_ACER_MAGIC 0x1868 +#define ALX_ACER_MAGIC_EN BIT(31) +#define ALX_ACER_MAGIC_PME_EN BIT(30) +#define ALX_ACER_MAGIC_MATCH BIT(29) +#define ALX_ACER_MAGIC_FF_CHECK BIT(10) +#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1FUL +#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5 +#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1FUL +#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0 + +#define ALX_ACER_TIMER 0x186C +#define ALX_ACER_TIMER_EN BIT(31) +#define ALX_ACER_TIMER_PME_EN BIT(30) +#define ALX_ACER_TIMER_MATCH BIT(29) +#define ALX_ACER_TIMER_THRES_MASK 0x1FFFFUL +#define ALX_ACER_TIMER_THRES_SHIFT 0 +#define ALX_ACER_TIMER_THRES_DEF 1 + +/* RSS definitions */ +#define ALX_RSS_KEY0 0x14B0 +#define ALX_RSS_KEY1 0x14B4 +#define ALX_RSS_KEY2 0x14B8 +#define ALX_RSS_KEY3 0x14BC +#define ALX_RSS_KEY4 0x14C0 +#define ALX_RSS_KEY5 0x14C4 +#define ALX_RSS_KEY6 0x14C8 +#define ALX_RSS_KEY7 0x14CC +#define ALX_RSS_KEY8 0x14D0 +#define ALX_RSS_KEY9 0x14D4 + +#define ALX_RSS_IDT_TBL0 0x1B00 +#define ALX_RSS_IDT_TBL1 0x1B04 +#define ALX_RSS_IDT_TBL2 0x1B08 +#define ALX_RSS_IDT_TBL3 0x1B0C +#define ALX_RSS_IDT_TBL4 0x1B10 +#define ALX_RSS_IDT_TBL5 0x1B14 +#define ALX_RSS_IDT_TBL6 0x1B18 +#define ALX_RSS_IDT_TBL7 0x1B1C +#define ALX_RSS_IDT_TBL8 0x1B20 +#define ALX_RSS_IDT_TBL9 0x1B24 +#define ALX_RSS_IDT_TBL10 0x1B28 +#define ALX_RSS_IDT_TBL11 0x1B2C +#define ALX_RSS_IDT_TBL12 0x1B30 +#define ALX_RSS_IDT_TBL13 0x1B34 +#define ALX_RSS_IDT_TBL14 0x1B38 +#define ALX_RSS_IDT_TBL15 0x1B3C +#define ALX_RSS_IDT_TBL16 0x1B40 +#define ALX_RSS_IDT_TBL17 0x1B44 +#define ALX_RSS_IDT_TBL18 0x1B48 +#define ALX_RSS_IDT_TBL19 0x1B4C +#define ALX_RSS_IDT_TBL20 0x1B50 +#define ALX_RSS_IDT_TBL21 0x1B54 +#define ALX_RSS_IDT_TBL22 0x1B58 +#define ALX_RSS_IDT_TBL23 0x1B5C +#define ALX_RSS_IDT_TBL24 0x1B60 +#define ALX_RSS_IDT_TBL25 0x1B64 +#define ALX_RSS_IDT_TBL26 0x1B68 +#define ALX_RSS_IDT_TBL27 0x1B6C +#define ALX_RSS_IDT_TBL28 0x1B70 +#define ALX_RSS_IDT_TBL29 0x1B74 +#define ALX_RSS_IDT_TBL30 0x1B78 +#define ALX_RSS_IDT_TBL31 0x1B7C + +#define ALX_RSS_HASH_VAL 0x15B0 +#define ALX_RSS_HASH_FLAG 0x15B4 + +#define ALX_RSS_BASE_CPU_NUM 0x15B8 + +#define ALX_MSI_MAP_TBL1 0x15D0 +#define ALX_MSI_MAP_TBL1_ALERT_MASK 0xFUL +#define ALX_MSI_MAP_TBL1_ALERT_SHIFT 28 +#define ALX_MSI_MAP_TBL1_TIMER_MASK 0xFUL +#define ALX_MSI_MAP_TBL1_TIMER_SHIFT 24 +#define ALX_MSI_MAP_TBL1_TXQ1_MASK 0xFUL +#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20 +#define ALX_MSI_MAP_TBL1_TXQ0_MASK 0xFUL +#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16 +#define ALX_MSI_MAP_TBL1_RXQ3_MASK 0xFUL +#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12 +#define ALX_MSI_MAP_TBL1_RXQ2_MASK 0xFUL +#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8 +#define ALX_MSI_MAP_TBL1_RXQ1_MASK 0xFUL +#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4 +#define ALX_MSI_MAP_TBL1_RXQ0_MASK 0xFUL +#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0 + +#define ALX_MSI_MAP_TBL2 0x15D8 +#define ALX_MSI_MAP_TBL2_PHY_MASK 0xFUL +#define ALX_MSI_MAP_TBL2_PHY_SHIFT 28 +#define ALX_MSI_MAP_TBL2_SMB_MASK 0xFUL +#define ALX_MSI_MAP_TBL2_SMB_SHIFT 24 +#define ALX_MSI_MAP_TBL2_TXQ3_MASK 0xFUL +#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20 +#define ALX_MSI_MAP_TBL2_TXQ2_MASK 0xFUL +#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16 +#define ALX_MSI_MAP_TBL2_RXQ7_MASK 0xFUL +#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12 +#define ALX_MSI_MAP_TBL2_RXQ6_MASK 0xFUL +#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8 +#define ALX_MSI_MAP_TBL2_RXQ5_MASK 0xFUL +#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4 +#define ALX_MSI_MAP_TBL2_RXQ4_MASK 0xFUL +#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0 + +#define ALX_MSI_ID_MAP 0x15D4 +#define ALX_MSI_ID_MAP_RXQ7 BIT(30) +#define ALX_MSI_ID_MAP_RXQ6 BIT(29) +#define ALX_MSI_ID_MAP_RXQ5 BIT(28) +#define ALX_MSI_ID_MAP_RXQ4 BIT(27) +/* bit26: 0:common,1:timer */ +#define ALX_MSI_ID_MAP_PCIELNKDW BIT(26) +#define ALX_MSI_ID_MAP_PCIECERR BIT(25) +#define ALX_MSI_ID_MAP_PCIENFERR BIT(24) +#define ALX_MSI_ID_MAP_PCIEFERR BIT(23) +#define ALX_MSI_ID_MAP_PCIEUR BIT(22) +#define ALX_MSI_ID_MAP_MACTX BIT(21) +#define ALX_MSI_ID_MAP_MACRX BIT(20) +#define ALX_MSI_ID_MAP_RXQ3 BIT(19) +#define ALX_MSI_ID_MAP_RXQ2 BIT(18) +#define ALX_MSI_ID_MAP_RXQ1 BIT(17) +#define ALX_MSI_ID_MAP_RXQ0 BIT(16) +#define ALX_MSI_ID_MAP_TXQ0 BIT(15) +#define ALX_MSI_ID_MAP_TXQTO BIT(14) +#define ALX_MSI_ID_MAP_LPW BIT(13) +#define ALX_MSI_ID_MAP_PHY BIT(12) +#define ALX_MSI_ID_MAP_TXCREDIT BIT(11) +#define ALX_MSI_ID_MAP_DMAW BIT(10) +#define ALX_MSI_ID_MAP_DMAR BIT(9) +#define ALX_MSI_ID_MAP_TXFUR BIT(8) +#define ALX_MSI_ID_MAP_TXQ3 BIT(7) +#define ALX_MSI_ID_MAP_TXQ2 BIT(6) +#define ALX_MSI_ID_MAP_TXQ1 BIT(5) +#define ALX_MSI_ID_MAP_RFDUR BIT(4) +#define ALX_MSI_ID_MAP_RXFOV BIT(3) +#define ALX_MSI_ID_MAP_MANU BIT(2) +#define ALX_MSI_ID_MAP_TIMER BIT(1) +#define ALX_MSI_ID_MAP_SMB BIT(0) + +#define ALX_MSI_RETRANS_TIMER 0x1920 +/* bit16: 1:line,0:standard */ +#define ALX_MSI_MASK_SEL_LINE BIT(16) +#define ALX_MSI_RETRANS_TM_MASK 0xFFFFUL +#define ALX_MSI_RETRANS_TM_SHIFT 0 + +#define ALX_CR_DMA_CTRL 0x1930 +#define ALX_CR_DMA_CTRL_PRI BIT(22) +#define ALX_CR_DMA_CTRL_RRDRXD_JOINT BIT(21) +#define ALX_CR_DMA_CTRL_BWCREDIT_MASK 0x3UL +#define ALX_CR_DMA_CTRL_BWCREDIT_SHIFT 19 +#define ALX_CR_DMA_CTRL_BWCREDIT_2KB 0 +#define ALX_CR_DMA_CTRL_BWCREDIT_1KB 1 +#define ALX_CR_DMA_CTRL_BWCREDIT_4KB 2 +#define ALX_CR_DMA_CTRL_BWCREDIT_8KB 3 +#define ALX_CR_DMA_CTRL_BW_EN BIT(18) +#define ALX_CR_DMA_CTRL_BW_RATIO_MASK 0x3UL +#define ALX_CR_DMA_CTRL_BW_RATIO_1_2 0 +#define ALX_CR_DMA_CTRL_BW_RATIO_1_4 1 +#define ALX_CR_DMA_CTRL_BW_RATIO_1_8 2 +#define ALX_CR_DMA_CTRL_BW_RATIO_2_1 3 +#define ALX_CR_DMA_CTRL_SOFT_RST BIT(11) +#define ALX_CR_DMA_CTRL_TXEARLY_EN BIT(10) +#define ALX_CR_DMA_CTRL_RXEARLY_EN BIT(9) +#define ALX_CR_DMA_CTRL_WEARLY_EN BIT(8) +#define ALX_CR_DMA_CTRL_RXTH_MASK 0xFUL +#define ALX_CR_DMA_CTRL_WTH_MASK 0xFUL + + +#define ALX_EFUSE_BIST 0x1934 +#define ALX_EFUSE_BIST_COL_MASK 0x3FUL +#define ALX_EFUSE_BIST_COL_SHIFT 24 +#define ALX_EFUSE_BIST_ROW_MASK 0x7FUL +#define ALX_EFUSE_BIST_ROW_SHIFT 12 +#define ALX_EFUSE_BIST_STEP_MASK 0xFUL +#define ALX_EFUSE_BIST_STEP_SHIFT 8 +#define ALX_EFUSE_BIST_PAT_MASK 0x7UL +#define ALX_EFUSE_BIST_PAT_SHIFT 4 +#define ALX_EFUSE_BIST_CRITICAL BIT(3) +#define ALX_EFUSE_BIST_FIXED BIT(2) +#define ALX_EFUSE_BIST_FAIL BIT(1) +#define ALX_EFUSE_BIST_NOW BIT(0) + +/* CR DMA ctrl */ + +/* TX QoS */ +#define ALX_WRR 0x1938 +#define ALX_WRR_PRI_MASK 0x3UL +#define ALX_WRR_PRI_SHIFT 29 +#define ALX_WRR_PRI_RESTRICT_ALL 0 +#define ALX_WRR_PRI_RESTRICT_HI 1 +#define ALX_WRR_PRI_RESTRICT_HI2 2 +#define ALX_WRR_PRI_RESTRICT_NONE 3 +#define ALX_WRR_PRI3_MASK 0x1FUL +#define ALX_WRR_PRI3_SHIFT 24 +#define ALX_WRR_PRI2_MASK 0x1FUL +#define ALX_WRR_PRI2_SHIFT 16 +#define ALX_WRR_PRI1_MASK 0x1FUL +#define ALX_WRR_PRI1_SHIFT 8 +#define ALX_WRR_PRI0_MASK 0x1FUL +#define ALX_WRR_PRI0_SHIFT 0 + +#define ALX_HQTPD 0x193C +#define ALX_HQTPD_BURST_EN BIT(31) +#define ALX_HQTPD_Q3_NUMPREF_MASK 0xFUL +#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8 +#define ALX_HQTPD_Q2_NUMPREF_MASK 0xFUL +#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4 +#define ALX_HQTPD_Q1_NUMPREF_MASK 0xFUL +#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0 + +#define ALX_CPUMAP1 0x19A0 +#define ALX_CPUMAP1_VCT7_MASK 0xFUL +#define ALX_CPUMAP1_VCT7_SHIFT 28 +#define ALX_CPUMAP1_VCT6_MASK 0xFUL +#define ALX_CPUMAP1_VCT6_SHIFT 24 +#define ALX_CPUMAP1_VCT5_MASK 0xFUL +#define ALX_CPUMAP1_VCT5_SHIFT 20 +#define ALX_CPUMAP1_VCT4_MASK 0xFUL +#define ALX_CPUMAP1_VCT4_SHIFT 16 +#define ALX_CPUMAP1_VCT3_MASK 0xFUL +#define ALX_CPUMAP1_VCT3_SHIFT 12 +#define ALX_CPUMAP1_VCT2_MASK 0xFUL +#define ALX_CPUMAP1_VCT2_SHIFT 8 +#define ALX_CPUMAP1_VCT1_MASK 0xFUL +#define ALX_CPUMAP1_VCT1_SHIFT 4 +#define ALX_CPUMAP1_VCT0_MASK 0xFUL +#define ALX_CPUMAP1_VCT0_SHIFT 0 + +#define ALX_CPUMAP2 0x19A4 +#define ALX_CPUMAP2_VCT15_MASK 0xFUL +#define ALX_CPUMAP2_VCT15_SHIFT 28 +#define ALX_CPUMAP2_VCT14_MASK 0xFUL +#define ALX_CPUMAP2_VCT14_SHIFT 24 +#define ALX_CPUMAP2_VCT13_MASK 0xFUL +#define ALX_CPUMAP2_VCT13_SHIFT 20 +#define ALX_CPUMAP2_VCT12_MASK 0xFUL +#define ALX_CPUMAP2_VCT12_SHIFT 16 +#define ALX_CPUMAP2_VCT11_MASK 0xFUL +#define ALX_CPUMAP2_VCT11_SHIFT 12 +#define ALX_CPUMAP2_VCT10_MASK 0xFUL +#define ALX_CPUMAP2_VCT10_SHIFT 8 +#define ALX_CPUMAP2_VCT9_MASK 0xFUL +#define ALX_CPUMAP2_VCT9_SHIFT 4 +#define ALX_CPUMAP2_VCT8_MASK 0xFUL +#define ALX_CPUMAP2_VCT8_SHIFT 0 + +#define ALX_MISC 0x19C0 +/* bit31: 0:vector,1:cpu */ +#define ALX_MISC_MODU BIT(31) +#define ALX_MISC_OVERCUR BIT(29) +#define ALX_MISC_PSWR_EN BIT(28) +#define ALX_MISC_PSW_CTRL_MASK 0xFUL +#define ALX_MISC_PSW_CTRL_SHIFT 24 +#define ALX_MISC_PSW_OCP_MASK 0x7UL +#define ALX_MISC_PSW_OCP_SHIFT 21 +#define ALX_MISC_PSW_OCP_DEF 0x7 +#define ALX_MISC_V18_HIGH BIT(20) +#define ALX_MISC_LPO_CTRL_MASK 0xFUL +#define ALX_MISC_LPO_CTRL_SHIFT 16 +#define ALX_MISC_ISO_EN BIT(12) +#define ALX_MISC_XSTANA_ALWAYS_ON BIT(11) +#define ALX_MISC_SYS25M_SEL_ADAPTIVE BIT(10) +#define ALX_MISC_SPEED_SIM BIT(9) +#define ALX_MISC_S1_LWP_EN BIT(8) +/* bit7: pcie/mac do pwsaving as phy in lpw state */ +#define ALX_MISC_MACLPW BIT(7) +#define ALX_MISC_125M_SW BIT(6) +#define ALX_MISC_INTNLOSC_OFF_EN BIT(5) +/* bit4: 0:chipset,1:crystle */ +#define ALX_MISC_EXTN25M_SEL BIT(4) +#define ALX_MISC_INTNLOSC_OPEN BIT(3) +#define ALX_MISC_SMBUS_AT_LED BIT(2) +#define ALX_MISC_PPS_AT_LED_MASK 0x3UL +#define ALX_MISC_PPS_AT_LED_SHIFT 0 +#define ALX_MISC_PPS_AT_LED_ACT 1 +#define ALX_MISC_PPS_AT_LED_10_100 2 +#define ALX_MISC_PPS_AT_LED_1000 3 + +#define ALX_MISC1 0x19C4 +#define ALX_MSC1_BLK_CRASPM_REQ BIT(15) + +#define ALX_MSIC2 0x19C8 +#define ALX_MSIC2_CALB_START BIT(0) + +#define ALX_MISC3 0x19CC +/* bit1: 1:Software control 25M */ +#define ALX_MISC3_25M_BY_SW BIT(1) +/* bit0: 25M switch to intnl OSC */ +#define ALX_MISC3_25M_NOTO_INTNL BIT(0) + +/* MSIX tbl in memory space */ +#define ALX_MSIX_ENTRY_BASE 0x2000 + +/***************************** IO mapping registers ***************************/ +#define ALX_IO_ADDR 0x00 +#define ALX_IO_DATA 0x04 +/* same as reg1400 */ +#define ALX_IO_MASTER 0x08 +/* same as reg1480 */ +#define ALX_IO_MAC_CTRL 0x0C +/* same as reg1600 */ +#define ALX_IO_ISR 0x10 +/* same as reg 1604 */ +#define ALX_IO_IMR 0x14 +/* word, same as reg15F0 */ +#define ALX_IO_TPD_PRI1_PIDX 0x18 +/* word, same as reg15F2 */ +#define ALX_IO_TPD_PRI0_PIDX 0x1A +/* word, same as reg15F4 */ +#define ALX_IO_TPD_PRI1_CIDX 0x1C +/* word, same as reg15F6 */ +#define ALX_IO_TPD_PRI0_CIDX 0x1E +/* word, same as reg15E0 */ +#define ALX_IO_RFD_PIDX 0x20 +/* word, same as reg15F8 */ +#define ALX_IO_RFD_CIDX 0x30 +/* same as reg1414 */ +#define ALX_IO_MDIO 0x38 +/* same as reg140C */ +#define ALX_IO_PHY_CTRL 0x3C + + +/********************* PHY regs definition ***************************/ + +/* Autoneg Advertisement Register */ +#define ALX_ADVERTISE_SPEED_MASK 0x01E0 +#define ALX_ADVERTISE_DEFAULT_CAP 0x1DE0 + +/* 1000BASE-T Control Register (0x9) */ +#define ALX_GIGA_CR_1000T_HD_CAPS 0x0100 +#define ALX_GIGA_CR_1000T_FD_CAPS 0x0200 +#define ALX_GIGA_CR_1000T_REPEATER_DTE 0x0400 + +#define ALX_GIGA_CR_1000T_MS_VALUE 0x0800 + +#define ALX_GIGA_CR_1000T_MS_ENABLE 0x1000 + +#define ALX_GIGA_CR_1000T_TEST_MODE_NORMAL 0x0000 +#define ALX_GIGA_CR_1000T_TEST_MODE_1 0x2000 +#define ALX_GIGA_CR_1000T_TEST_MODE_2 0x4000 +#define ALX_GIGA_CR_1000T_TEST_MODE_3 0x6000 +#define ALX_GIGA_CR_1000T_TEST_MODE_4 0x8000 +#define ALX_GIGA_CR_1000T_SPEED_MASK 0x0300 +#define ALX_GIGA_CR_1000T_DEFAULT_CAP 0x0300 + +/* 1000BASE-T Status Register */ +#define ALX_MII_GIGA_SR 0x0A + +/* PHY Specific Status Register */ +#define ALX_MII_GIGA_PSSR 0x11 +#define ALX_GIGA_PSSR_FC_RXEN 0x0004 +#define ALX_GIGA_PSSR_FC_TXEN 0x0008 +#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 +#define ALX_GIGA_PSSR_DPLX 0x2000 +#define ALX_GIGA_PSSR_SPEED 0xC000 +#define ALX_GIGA_PSSR_10MBS 0x0000 +#define ALX_GIGA_PSSR_100MBS 0x4000 +#define ALX_GIGA_PSSR_1000MBS 0x8000 + +/* PHY Interrupt Enable Register */ +#define ALX_MII_IER 0x12 +#define ALX_IER_LINK_UP 0x0400 +#define ALX_IER_LINK_DOWN 0x0800 + +/* PHY Interrupt Status Register */ +#define ALX_MII_ISR 0x13 +#define ALX_ISR_LINK_UP 0x0400 +#define ALX_ISR_LINK_DOWN 0x0800 + +/* Cable-Detect-Test Control Register */ +#define ALX_MII_CDTC 0x16 +/* self clear */ +#define ALX_CDTC_EN 1 +#define ALX_CDTC_PAIR_MASK 0x3U +#define ALX_CDTC_PAIR_SHIFT 8 + + +/* Cable-Detect-Test Status Register */ +#define ALX_MII_CDTS 0x1C +#define ALX_CDTS_STATUS_MASK 0x3U +#define ALX_CDTS_STATUS_SHIFT 8 +#define ALX_CDTS_STATUS_NORMAL 0 +#define ALX_CDTS_STATUS_SHORT 1 +#define ALX_CDTS_STATUS_OPEN 2 +#define ALX_CDTS_STATUS_INVALID 3 + +#define ALX_MII_DBG_ADDR 0x1D +#define ALX_MII_DBG_DATA 0x1E + +/***************************** debug port *************************************/ + +#define ALX_MIIDBG_ANACTRL 0x00 +#define ALX_ANACTRL_CLK125M_DELAY_EN 0x8000 +#define ALX_ANACTRL_VCO_FAST 0x4000 +#define ALX_ANACTRL_VCO_SLOW 0x2000 +#define ALX_ANACTRL_AFE_MODE_EN 0x1000 +#define ALX_ANACTRL_LCKDET_PHY 0x0800 +#define ALX_ANACTRL_LCKDET_EN 0x0400 +#define ALX_ANACTRL_OEN_125M 0x0200 +#define ALX_ANACTRL_HBIAS_EN 0x0100 +#define ALX_ANACTRL_HB_EN 0x0080 +#define ALX_ANACTRL_SEL_HSP 0x0040 +#define ALX_ANACTRL_CLASSA_EN 0x0020 +#define ALX_ANACTRL_MANUSWON_SWR_MASK 0x3U +#define ALX_ANACTRL_MANUSWON_SWR_SHIFT 2 +#define ALX_ANACTRL_MANUSWON_SWR_2V 0 +#define ALX_ANACTRL_MANUSWON_SWR_1P9V 1 +#define ALX_ANACTRL_MANUSWON_SWR_1P8V 2 +#define ALX_ANACTRL_MANUSWON_SWR_1P7V 3 +#define ALX_ANACTRL_MANUSWON_BW3_4M 0x0002 +#define ALX_ANACTRL_RESTART_CAL 0x0001 +#define ALX_ANACTRL_DEF 0x02EF + + +#define ALX_MIIDBG_SYSMODCTRL 0x04 +#define ALX_SYSMODCTRL_IECHOADJ_PFMH_PHY 0x8000 +#define ALX_SYSMODCTRL_IECHOADJ_BIASGEN 0x4000 +#define ALX_SYSMODCTRL_IECHOADJ_PFML_PHY 0x2000 +#define ALX_SYSMODCTRL_IECHOADJ_PS_MASK 0x3U +#define ALX_SYSMODCTRL_IECHOADJ_PS_SHIFT 10 +#define ALX_SYSMODCTRL_IECHOADJ_PS_40 3 +#define ALX_SYSMODCTRL_IECHOADJ_PS_20 2 +#define ALX_SYSMODCTRL_IECHOADJ_PS_0 1 +#define ALX_SYSMODCTRL_IECHOADJ_10BT_100MV 0x0040 +#define ALX_SYSMODCTRL_IECHOADJ_HLFAP_MASK 0x3U +#define ALX_SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4 +#define ALX_SYSMODCTRL_IECHOADJ_VDFULBW 0x0008 +#define ALX_SYSMODCTRL_IECHOADJ_VDBIASHLF 0x0004 +#define ALX_SYSMODCTRL_IECHOADJ_VDAMPHLF 0x0002 +#define ALX_SYSMODCTRL_IECHOADJ_VDLANSW 0x0001 +/* en half bias */ +#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B + + +#define ALX_MIIDBG_SRDSYSMOD 0x05 +#define ALX_SRDSYSMOD_LCKDET_EN 0x2000 +#define ALX_SRDSYSMOD_PLL_EN 0x0800 +#define ALX_SRDSYSMOD_SEL_HSP 0x0400 +#define ALX_SRDSYSMOD_HLFTXDR 0x0200 +#define ALX_SRDSYSMOD_TXCLK_DELAY_EN 0x0100 +#define ALX_SRDSYSMOD_TXELECIDLE 0x0080 +#define ALX_SRDSYSMOD_DEEMP_EN 0x0040 +#define ALX_SRDSYSMOD_MS_PAD 0x0004 +#define ALX_SRDSYSMOD_CDR_ADC_VLTG 0x0002 +#define ALX_SRDSYSMOD_CDR_DAC_1MA 0x0001 +#define ALX_SRDSYSMOD_DEF 0x2C46 + + +#define ALX_MIIDBG_HIBNEG 0x0B +#define ALX_HIBNEG_PSHIB_EN 0x8000 +#define ALX_HIBNEG_WAKE_BOTH 0x4000 +#define ALX_HIBNEG_ONOFF_ANACHG_SUDEN 0x2000 +#define ALX_HIBNEG_HIB_PULSE 0x1000 +#define ALX_HIBNEG_GATE_25M_EN 0x0800 +#define ALX_HIBNEG_RST_80U 0x0400 +#define ALX_HIBNEG_RST_TIMER_MASK 0x3U +#define ALX_HIBNEG_RST_TIMER_SHIFT 8 +#define ALX_HIBNEG_GTX_CLK_DELAY_MASK 0x3U +#define ALX_HIBNEG_GTX_CLK_DELAY_SHIFT 5 +#define ALX_HIBNEG_BYPSS_BRKTIMER 0x0010 +#define ALX_HIBNEG_DEF 0xBC40 +#define ALX_HIBNEG_NOHIB (\ +ALX_HIBNEG_DEF & ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PULSE)) + +#define ALX_MIIDBG_TST10BTCFG 0x12 +#define ALX_TST10BTCFG_INTV_TIMER_MASK 0x3U +#define ALX_TST10BTCFG_INTV_TIMER_SHIFT 14 +#define ALX_TST10BTCFG_TRIGER_TIMER_MASK 0x3U +#define ALX_TST10BTCFG_TRIGER_TIMER_SHIFT 12 +#define ALX_TST10BTCFG_DIV_MAN_MLT3_EN 0x0800 +#define ALX_TST10BTCFG_OFF_DAC_IDLE 0x0400 +#define ALX_TST10BTCFG_LPBK_DEEP 0x0004 +#define ALX_TST10BTCFG_DEF 0x4C04 + +#define ALX_MIIDBG_AZ_ANADECT 0x15 +#define ALX_AZ_ANADECT_10BTRX_TH 0x8000 +#define ALX_AZ_ANADECT_BOTH_01CHNL 0x4000 +#define ALX_AZ_ANADECT_INTV_MASK 0x3FU +#define ALX_AZ_ANADECT_INTV_SHIFT 8 +#define ALX_AZ_ANADECT_THRESH_MASK 0xFU +#define ALX_AZ_ANADECT_THRESH_SHIFT 4 +#define ALX_AZ_ANADECT_CHNL_MASK 0xFU +#define ALX_AZ_ANADECT_CHNL_SHIFT 0 +#define ALX_AZ_ANADECT_DEF 0x3220 +#define ALX_AZ_ANADECT_LONG 0x3210 + +#define ALX_MIIDBG_MSE16DB 0x18 +#define ALX_MSE16DB_UP 0x05EA +#define ALX_MSE16DB_DOWN 0x02EA + +#define ALX_MIIDBG_MSE20DB 0x1C +#define ALX_MSE20DB_TH_MASK 0x7F +#define ALX_MSE20DB_TH_SHIFT 2 +#define ALX_MSE20DB_TH_DEF 0x2E +#define ALX_MSE20DB_TH_HI 0x54 + +#define ALX_MIIDBG_AGC 0x23 +#define ALX_AGC_2_VGA_MASK 0x3FU +#define ALX_AGC_2_VGA_SHIFT 8 +#define ALX_AGC_LONG1G_LIMT 40 +#define ALX_AGC_LONG100M_LIMT 44 + +#define ALX_MIIDBG_LEGCYPS 0x29 +#define ALX_LEGCYPS_EN 0x8000 +#define ALX_LEGCYPS_DAC_AMP1000_MASK 0x7U +#define ALX_LEGCYPS_DAC_AMP1000_SHIFT 12 +#define ALX_LEGCYPS_DAC_AMP100_MASK 0x7U +#define ALX_LEGCYPS_DAC_AMP100_SHIFT 9 +#define ALX_LEGCYPS_DAC_AMP10_MASK 0x7U +#define ALX_LEGCYPS_DAC_AMP10_SHIFT 6 +#define ALX_LEGCYPS_UNPLUG_TIMER_MASK 0x7U +#define ALX_LEGCYPS_UNPLUG_TIMER_SHIFT 3 +#define ALX_LEGCYPS_UNPLUG_DECT_EN 0x0004 +#define ALX_LEGCYPS_ECNC_PS_EN 0x0001 +#define ALX_LEGCYPS_DEF 0x129D + +#define ALX_MIIDBG_TST100BTCFG 0x36 +#define ALX_TST100BTCFG_NORMAL_BW_EN 0x8000 +#define ALX_TST100BTCFG_BADLNK_BYPASS 0x4000 +#define ALX_TST100BTCFG_SHORTCABL_TH_MASK 0x3FU +#define ALX_TST100BTCFG_SHORTCABL_TH_SHIFT 8 +#define ALX_TST100BTCFG_LITCH_EN 0x0080 +#define ALX_TST100BTCFG_VLT_SW 0x0040 +#define ALX_TST100BTCFG_LONGCABL_TH_MASK 0x3FU +#define ALX_TST100BTCFG_LONGCABL_TH_SHIFT 0 +#define ALX_TST100BTCFG_DEF 0xE12C + +#define ALX_MIIDBG_GREENCFG 0x3B +#define ALX_GREENCFG_MSTPS_MSETH2_MASK 0xFFU +#define ALX_GREENCFG_MSTPS_MSETH2_SHIFT 8 +#define ALX_GREENCFG_MSTPS_MSETH1_MASK 0xFFU +#define ALX_GREENCFG_MSTPS_MSETH1_SHIFT 0 +#define ALX_GREENCFG_DEF 0x7078 + +#define ALX_MIIDBG_GREENCFG2 0x3D +#define ALX_GREENCFG2_BP_GREEN 0x8000 +#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080 + + +/***************************** extension **************************************/ + +/******* dev 3 *********/ +#define ALX_MIIEXT_PCS 3 + +#define ALX_MIIEXT_CLDCTRL3 0x8003 +#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000 +#define ALX_CLDCTRL3_AZ_DISAMP 0x1000 + +#define ALX_MIIEXT_CLDCTRL5 0x8005 +#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000 + +#define ALX_MIIEXT_CLDCTRL6 0x8006 +#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFFU +#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0 +#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116 +#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152 + +#define ALX_MIIEXT_CLDCTRL7 0x8007 +#define ALX_CLDCTRL7_VDHLF_BIAS_TH_MASK 0x7FU +#define ALX_CLDCTRL7_VDHLF_BIAS_TH_SHIFT 9 +#define ALX_CLDCTRL7_AFE_AZ_MASK 0x1FU +#define ALX_CLDCTRL7_AFE_AZ_SHIFT 4 +#define ALX_CLDCTRL7_SIDE_PEAK_TH_MASK 0xFU +#define ALX_CLDCTRL7_SIDE_PEAK_TH_SHIFT 0 +#define ALX_CLDCTRL7_DEF 0x6BF6 + +#define ALX_MIIEXT_AZCTRL 0x8008 +#define ALX_AZCTRL_SHORT_TH_MASK 0xFFU +#define ALX_AZCTRL_SHORT_TH_SHIFT 8 +#define ALX_AZCTRL_LONG_TH_MASK 0xFFU +#define ALX_AZCTRL_LONG_TH_SHIFT 0 +#define ALX_AZCTRL_DEF 0x1629 + +#define ALX_MIIEXT_AZCTRL2 0x8009 +#define ALX_AZCTRL2_WAKETRNING_MASK 0xFFU +#define ALX_AZCTRL2_WAKETRNING_SHIFT 8 +#define ALX_AZCTRL2_QUIET_TIMER_MASK 0x3U +#define ALX_AZCTRL2_QUIET_TIMER_SHIFT 6 +#define ALX_AZCTRL2_PHAS_JMP2 0x0010 +#define ALX_AZCTRL2_CLKTRCV_125MD16 0x0008 +#define ALX_AZCTRL2_GATE1000_EN 0x0004 +#define ALX_AZCTRL2_AVRG_FREQ 0x0002 +#define ALX_AZCTRL2_PHAS_JMP4 0x0001 +#define ALX_AZCTRL2_DEF 0x32C0 + +#define ALX_MIIEXT_AZCTRL6 0x800D + +#define ALX_MIIEXT_VDRVBIAS 0x8062 +#define ALX_VDRVBIAS_SEL_MASK 0x3U +#define ALX_VDRVBIAS_SEL_SHIFT 0 +#define ALX_VDRVBIAS_DEF 0x3 + +/********* dev 7 **********/ +#define ALX_MIIEXT_ANEG 7 + +#define ALX_MIIEXT_LOCAL_EEEADV 0x3C +#define ALX_LOCAL_EEEADV_1000BT 0x0004 +#define ALX_LOCAL_EEEADV_100BT 0x0002 + +#define ALX_MIIEXT_REMOTE_EEEADV 0x3D +#define ALX_REMOTE_EEEADV_1000BT 0x0004 +#define ALX_REMOTE_EEEADV_100BT 0x0002 + +#define ALX_MIIEXT_EEE_ANEG 0x8000 +#define ALX_EEE_ANEG_1000M 0x0004 +#define ALX_EEE_ANEG_100M 0x0002 + +#define ALX_MIIEXT_AFE 0x801A +#define ALX_AFE_10BT_100M_TH 0x0040 + +#define ALX_MIIEXT_S3DIG10 0x8023 +/* bit0: 1:bypass 10BT rx fifo, 0:riginal 10BT rx */ +#define ALX_MIIEXT_S3DIG10_SL 0x0001 +#define ALX_MIIEXT_S3DIG10_DEF 0 + +#define ALX_MIIEXT_NLP34 0x8025 +/* for 160m */ +#define ALX_MIIEXT_NLP34_DEF 0x1010 + +#define ALX_MIIEXT_NLP56 0x8026 +/* for 160m */ +#define ALX_MIIEXT_NLP56_DEF 0x1010 + +#define ALX_MIIEXT_NLP78 0x8027 +/* for 160m */ +#define ALX_MIIEXT_NLP78_160M_DEF 0x8D05 +#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05 + +#endif diff -Nurp linux-3.8.11/drivers/net/ethernet/atheros/alx/Makefile linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/Makefile --- linux-3.8.11/drivers/net/ethernet/atheros/alx/Makefile 1970-01-01 02:00:00.000000000 +0200 +++ linux-3.8.11-alx/drivers/net/ethernet/atheros/alx/Makefile 2013-05-05 13:51:31.000000000 +0300 @@ -0,0 +1,3 @@ +obj-$(CONFIG_ALX) += alx.o +alx-objs := alx_main.o alx_ethtool.o alx_hw.o +ccflags-y += -D__CHECK_ENDIAN__